aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/neterion
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-05-13 05:51:01 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-11 05:41:47 -0400
commit86387e1ac4fcaa45ff5578013a78593d1a0ba279 (patch)
tree25c662fa8226419e73c72873888634fe1df04693 /drivers/net/ethernet/neterion
parent93f7848b77bcf1108879defd32612422ae80d785 (diff)
s2io/vxge: Move the Exar drivers
Move the Exar drivers into drivers/net/ethernet/neterion/ and make the necessary Kconfig and Makefile changes. CC: Jon Mason <jdmason@kudzu.us> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/neterion')
-rw-r--r--drivers/net/ethernet/neterion/Kconfig54
-rw-r--r--drivers/net/ethernet/neterion/Makefile6
-rw-r--r--drivers/net/ethernet/neterion/s2io-regs.h958
-rw-r--r--drivers/net/ethernet/neterion/s2io.c8674
-rw-r--r--drivers/net/ethernet/neterion/s2io.h1148
-rw-r--r--drivers/net/ethernet/neterion/vxge/Makefile7
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c5123
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2111
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c1132
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.h67
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4854
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h519
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-reg.h4636
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c2514
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h2298
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-version.h49
16 files changed, 34150 insertions, 0 deletions
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
new file mode 100644
index 000000000000..3d98e62c2412
--- /dev/null
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -0,0 +1,54 @@
1#
2# Exar device configuration
3#
4
5config NET_VENDOR_EXAR
6 bool "Exar devices"
7 depends on PCI
8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say
10 Y and read the Ethernet-HOWTO, available from
11 <http://www.tldp.org/docs.html#howto>.
12
13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all
15 the questions about Exar cards. If you say Y, you will be asked for
16 your specific card in the following questions.
17
18if NET_VENDOR_EXAR
19
20config S2IO
21 tristate "Exar Xframe 10Gb Ethernet Adapter"
22 depends on PCI
23 ---help---
24 This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
25
26 More specific information on configuring the driver is in
27 <file:Documentation/networking/s2io.txt>.
28
29 To compile this driver as a module, choose M here. The module
30 will be called s2io.
31
32config VXGE
33 tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
34 depends on PCI && INET
35 ---help---
36 This driver supports Exar Corp's X3100 Series 10 GbE PCIe
37 I/O Virtualized Server Adapter.
38
39 More specific information on configuring the driver is in
40 <file:Documentation/networking/vxge.txt>.
41
42 To compile this driver as a module, choose M here. The module
43 will be called vxge.
44
45config VXGE_DEBUG_TRACE_ALL
46 bool "Enabling All Debug trace statements in driver"
47 default n
48 depends on VXGE
49 ---help---
50 Say Y here if you want to enabling all the debug trace statements in
51 the vxge driver. By default only few debug trace statements are
52 enabled.
53
54endif # NET_VENDOR_EXAR
diff --git a/drivers/net/ethernet/neterion/Makefile b/drivers/net/ethernet/neterion/Makefile
new file mode 100644
index 000000000000..70c8058a601a
--- /dev/null
+++ b/drivers/net/ethernet/neterion/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the Exar network device drivers.
3#
4
5obj-$(CONFIG_S2IO) += s2io.o
6obj-$(CONFIG_VXGE) += vxge/
diff --git a/drivers/net/ethernet/neterion/s2io-regs.h b/drivers/net/ethernet/neterion/s2io-regs.h
new file mode 100644
index 000000000000..3688325c11f5
--- /dev/null
+++ b/drivers/net/ethernet/neterion/s2io-regs.h
@@ -0,0 +1,958 @@
1/************************************************************************
2 * regs.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2010 Exar Corp.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 ************************************************************************/
13#ifndef _REGS_H
14#define _REGS_H
15
16#define TBD 0
17
18struct XENA_dev_config {
19/* Convention: mHAL_XXX is mask, vHAL_XXX is value */
20
21/* General Control-Status Registers */
22 u64 general_int_status;
23#define GEN_INTR_TXPIC s2BIT(0)
24#define GEN_INTR_TXDMA s2BIT(1)
25#define GEN_INTR_TXMAC s2BIT(2)
26#define GEN_INTR_TXXGXS s2BIT(3)
27#define GEN_INTR_TXTRAFFIC s2BIT(8)
28#define GEN_INTR_RXPIC s2BIT(32)
29#define GEN_INTR_RXDMA s2BIT(33)
30#define GEN_INTR_RXMAC s2BIT(34)
31#define GEN_INTR_MC s2BIT(35)
32#define GEN_INTR_RXXGXS s2BIT(36)
33#define GEN_INTR_RXTRAFFIC s2BIT(40)
34#define GEN_ERROR_INTR GEN_INTR_TXPIC | GEN_INTR_RXPIC | \
35 GEN_INTR_TXDMA | GEN_INTR_RXDMA | \
36 GEN_INTR_TXMAC | GEN_INTR_RXMAC | \
37 GEN_INTR_TXXGXS| GEN_INTR_RXXGXS| \
38 GEN_INTR_MC
39
40 u64 general_int_mask;
41
42 u8 unused0[0x100 - 0x10];
43
44 u64 sw_reset;
45/* XGXS must be removed from reset only once. */
46#define SW_RESET_XENA vBIT(0xA5,0,8)
47#define SW_RESET_FLASH vBIT(0xA5,8,8)
48#define SW_RESET_EOI vBIT(0xA5,16,8)
49#define SW_RESET_ALL (SW_RESET_XENA | \
50 SW_RESET_FLASH | \
51 SW_RESET_EOI)
52/* The SW_RESET register must read this value after a successful reset. */
53#define SW_RESET_RAW_VAL 0xA5000000
54
55
56 u64 adapter_status;
57#define ADAPTER_STATUS_TDMA_READY s2BIT(0)
58#define ADAPTER_STATUS_RDMA_READY s2BIT(1)
59#define ADAPTER_STATUS_PFC_READY s2BIT(2)
60#define ADAPTER_STATUS_TMAC_BUF_EMPTY s2BIT(3)
61#define ADAPTER_STATUS_PIC_QUIESCENT s2BIT(5)
62#define ADAPTER_STATUS_RMAC_REMOTE_FAULT s2BIT(6)
63#define ADAPTER_STATUS_RMAC_LOCAL_FAULT s2BIT(7)
64#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8)
65#define ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE vBIT(0x0F,8,8)
66#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8)
67#define ADAPTER_STATUS_MC_DRAM_READY s2BIT(24)
68#define ADAPTER_STATUS_MC_QUEUES_READY s2BIT(25)
69#define ADAPTER_STATUS_RIC_RUNNING s2BIT(26)
70#define ADAPTER_STATUS_M_PLL_LOCK s2BIT(30)
71#define ADAPTER_STATUS_P_PLL_LOCK s2BIT(31)
72
73 u64 adapter_control;
74#define ADAPTER_CNTL_EN s2BIT(7)
75#define ADAPTER_EOI_TX_ON s2BIT(15)
76#define ADAPTER_LED_ON s2BIT(23)
77#define ADAPTER_UDPI(val) vBIT(val,36,4)
78#define ADAPTER_WAIT_INT s2BIT(48)
79#define ADAPTER_ECC_EN s2BIT(55)
80
81 u64 serr_source;
82#define SERR_SOURCE_PIC s2BIT(0)
83#define SERR_SOURCE_TXDMA s2BIT(1)
84#define SERR_SOURCE_RXDMA s2BIT(2)
85#define SERR_SOURCE_MAC s2BIT(3)
86#define SERR_SOURCE_MC s2BIT(4)
87#define SERR_SOURCE_XGXS s2BIT(5)
88#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \
89 SERR_SOURCE_TXDMA | \
90 SERR_SOURCE_RXDMA | \
91 SERR_SOURCE_MAC | \
92 SERR_SOURCE_MC | \
93 SERR_SOURCE_XGXS)
94
95 u64 pci_mode;
96#define GET_PCI_MODE(val) ((val & vBIT(0xF, 0, 4)) >> 60)
97#define PCI_MODE_PCI_33 0
98#define PCI_MODE_PCI_66 0x1
99#define PCI_MODE_PCIX_M1_66 0x2
100#define PCI_MODE_PCIX_M1_100 0x3
101#define PCI_MODE_PCIX_M1_133 0x4
102#define PCI_MODE_PCIX_M2_66 0x5
103#define PCI_MODE_PCIX_M2_100 0x6
104#define PCI_MODE_PCIX_M2_133 0x7
105#define PCI_MODE_UNSUPPORTED s2BIT(0)
106#define PCI_MODE_32_BITS s2BIT(8)
107#define PCI_MODE_UNKNOWN_MODE s2BIT(9)
108
109 u8 unused_0[0x800 - 0x128];
110
111/* PCI-X Controller registers */
112 u64 pic_int_status;
113 u64 pic_int_mask;
114#define PIC_INT_TX s2BIT(0)
115#define PIC_INT_FLSH s2BIT(1)
116#define PIC_INT_MDIO s2BIT(2)
117#define PIC_INT_IIC s2BIT(3)
118#define PIC_INT_GPIO s2BIT(4)
119#define PIC_INT_RX s2BIT(32)
120
121 u64 txpic_int_reg;
122 u64 txpic_int_mask;
123#define PCIX_INT_REG_ECC_SG_ERR s2BIT(0)
124#define PCIX_INT_REG_ECC_DB_ERR s2BIT(1)
125#define PCIX_INT_REG_FLASHR_R_FSM_ERR s2BIT(8)
126#define PCIX_INT_REG_FLASHR_W_FSM_ERR s2BIT(9)
127#define PCIX_INT_REG_INI_TX_FSM_SERR s2BIT(10)
128#define PCIX_INT_REG_INI_TXO_FSM_ERR s2BIT(11)
129#define PCIX_INT_REG_TRT_FSM_SERR s2BIT(13)
130#define PCIX_INT_REG_SRT_FSM_SERR s2BIT(14)
131#define PCIX_INT_REG_PIFR_FSM_SERR s2BIT(15)
132#define PCIX_INT_REG_WRC_TX_SEND_FSM_SERR s2BIT(21)
133#define PCIX_INT_REG_RRC_TX_REQ_FSM_SERR s2BIT(23)
134#define PCIX_INT_REG_INI_RX_FSM_SERR s2BIT(48)
135#define PCIX_INT_REG_RA_RX_FSM_SERR s2BIT(50)
136/*
137#define PCIX_INT_REG_WRC_RX_SEND_FSM_SERR s2BIT(52)
138#define PCIX_INT_REG_RRC_RX_REQ_FSM_SERR s2BIT(54)
139#define PCIX_INT_REG_RRC_RX_SPLIT_FSM_SERR s2BIT(58)
140*/
141 u64 txpic_alarms;
142 u64 rxpic_int_reg;
143 u64 rxpic_int_mask;
144 u64 rxpic_alarms;
145
146 u64 flsh_int_reg;
147 u64 flsh_int_mask;
148#define PIC_FLSH_INT_REG_CYCLE_FSM_ERR s2BIT(63)
149#define PIC_FLSH_INT_REG_ERR s2BIT(62)
150 u64 flash_alarms;
151
152 u64 mdio_int_reg;
153 u64 mdio_int_mask;
154#define MDIO_INT_REG_MDIO_BUS_ERR s2BIT(0)
155#define MDIO_INT_REG_DTX_BUS_ERR s2BIT(8)
156#define MDIO_INT_REG_LASI s2BIT(39)
157 u64 mdio_alarms;
158
159 u64 iic_int_reg;
160 u64 iic_int_mask;
161#define IIC_INT_REG_BUS_FSM_ERR s2BIT(4)
162#define IIC_INT_REG_BIT_FSM_ERR s2BIT(5)
163#define IIC_INT_REG_CYCLE_FSM_ERR s2BIT(6)
164#define IIC_INT_REG_REQ_FSM_ERR s2BIT(7)
165#define IIC_INT_REG_ACK_ERR s2BIT(8)
166 u64 iic_alarms;
167
168 u8 unused4[0x08];
169
170 u64 gpio_int_reg;
171#define GPIO_INT_REG_DP_ERR_INT s2BIT(0)
172#define GPIO_INT_REG_LINK_DOWN s2BIT(1)
173#define GPIO_INT_REG_LINK_UP s2BIT(2)
174 u64 gpio_int_mask;
175#define GPIO_INT_MASK_LINK_DOWN s2BIT(1)
176#define GPIO_INT_MASK_LINK_UP s2BIT(2)
177 u64 gpio_alarms;
178
179 u8 unused5[0x38];
180
181 u64 tx_traffic_int;
182#define TX_TRAFFIC_INT_n(n) s2BIT(n)
183 u64 tx_traffic_mask;
184
185 u64 rx_traffic_int;
186#define RX_TRAFFIC_INT_n(n) s2BIT(n)
187 u64 rx_traffic_mask;
188
189/* PIC Control registers */
190 u64 pic_control;
191#define PIC_CNTL_RX_ALARM_MAP_1 s2BIT(0)
192#define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,5)
193
194 u64 swapper_ctrl;
195#define SWAPPER_CTRL_PIF_R_FE s2BIT(0)
196#define SWAPPER_CTRL_PIF_R_SE s2BIT(1)
197#define SWAPPER_CTRL_PIF_W_FE s2BIT(8)
198#define SWAPPER_CTRL_PIF_W_SE s2BIT(9)
199#define SWAPPER_CTRL_TXP_FE s2BIT(16)
200#define SWAPPER_CTRL_TXP_SE s2BIT(17)
201#define SWAPPER_CTRL_TXD_R_FE s2BIT(18)
202#define SWAPPER_CTRL_TXD_R_SE s2BIT(19)
203#define SWAPPER_CTRL_TXD_W_FE s2BIT(20)
204#define SWAPPER_CTRL_TXD_W_SE s2BIT(21)
205#define SWAPPER_CTRL_TXF_R_FE s2BIT(22)
206#define SWAPPER_CTRL_TXF_R_SE s2BIT(23)
207#define SWAPPER_CTRL_RXD_R_FE s2BIT(32)
208#define SWAPPER_CTRL_RXD_R_SE s2BIT(33)
209#define SWAPPER_CTRL_RXD_W_FE s2BIT(34)
210#define SWAPPER_CTRL_RXD_W_SE s2BIT(35)
211#define SWAPPER_CTRL_RXF_W_FE s2BIT(36)
212#define SWAPPER_CTRL_RXF_W_SE s2BIT(37)
213#define SWAPPER_CTRL_XMSI_FE s2BIT(40)
214#define SWAPPER_CTRL_XMSI_SE s2BIT(41)
215#define SWAPPER_CTRL_STATS_FE s2BIT(48)
216#define SWAPPER_CTRL_STATS_SE s2BIT(49)
217
218 u64 pif_rd_swapper_fb;
219#define IF_RD_SWAPPER_FB 0x0123456789ABCDEF
220
221 u64 scheduled_int_ctrl;
222#define SCHED_INT_CTRL_TIMER_EN s2BIT(0)
223#define SCHED_INT_CTRL_ONE_SHOT s2BIT(1)
224#define SCHED_INT_CTRL_INT2MSI(val) vBIT(val,10,6)
225#define SCHED_INT_PERIOD TBD
226
227 u64 txreqtimeout;
228#define TXREQTO_VAL(val) vBIT(val,0,32)
229#define TXREQTO_EN s2BIT(63)
230
231 u64 statsreqtimeout;
232#define STATREQTO_VAL(n) TBD
233#define STATREQTO_EN s2BIT(63)
234
235 u64 read_retry_delay;
236 u64 read_retry_acceleration;
237 u64 write_retry_delay;
238 u64 write_retry_acceleration;
239
240 u64 xmsi_control;
241 u64 xmsi_access;
242 u64 xmsi_address;
243 u64 xmsi_data;
244
245 u64 rx_mat;
246#define RX_MAT_SET(ring, msi) vBIT(msi, (8 * ring), 8)
247
248 u8 unused6[0x8];
249
250 u64 tx_mat0_n[0x8];
251#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
252
253 u64 xmsi_mask_reg;
254 u64 stat_byte_cnt;
255#define STAT_BC(n) vBIT(n,4,12)
256
257 /* Automated statistics collection */
258 u64 stat_cfg;
259#define STAT_CFG_STAT_EN s2BIT(0)
260#define STAT_CFG_ONE_SHOT_EN s2BIT(1)
261#define STAT_CFG_STAT_NS_EN s2BIT(8)
262#define STAT_CFG_STAT_RO s2BIT(9)
263#define STAT_TRSF_PER(n) TBD
264#define PER_SEC 0x208d5
265#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32)
266#define SET_UPDT_CLICKS(val) vBIT(val, 32, 32)
267
268 u64 stat_addr;
269
270 /* General Configuration */
271 u64 mdio_control;
272#define MDIO_MMD_INDX_ADDR(val) vBIT(val, 0, 16)
273#define MDIO_MMD_DEV_ADDR(val) vBIT(val, 19, 5)
274#define MDIO_MMS_PRT_ADDR(val) vBIT(val, 27, 5)
275#define MDIO_CTRL_START_TRANS(val) vBIT(val, 56, 4)
276#define MDIO_OP(val) vBIT(val, 60, 2)
277#define MDIO_OP_ADDR_TRANS 0x0
278#define MDIO_OP_WRITE_TRANS 0x1
279#define MDIO_OP_READ_POST_INC_TRANS 0x2
280#define MDIO_OP_READ_TRANS 0x3
281#define MDIO_MDIO_DATA(val) vBIT(val, 32, 16)
282
283 u64 dtx_control;
284
285 u64 i2c_control;
286#define I2C_CONTROL_DEV_ID(id) vBIT(id,1,3)
287#define I2C_CONTROL_ADDR(addr) vBIT(addr,5,11)
288#define I2C_CONTROL_BYTE_CNT(cnt) vBIT(cnt,22,2)
289#define I2C_CONTROL_READ s2BIT(24)
290#define I2C_CONTROL_NACK s2BIT(25)
291#define I2C_CONTROL_CNTL_START vBIT(0xE,28,4)
292#define I2C_CONTROL_CNTL_END(val) (val & vBIT(0x1,28,4))
293#define I2C_CONTROL_GET_DATA(val) (u32)(val & 0xFFFFFFFF)
294#define I2C_CONTROL_SET_DATA(val) vBIT(val,32,32)
295
296 u64 gpio_control;
297#define GPIO_CTRL_GPIO_0 s2BIT(8)
298 u64 misc_control;
299#define FAULT_BEHAVIOUR s2BIT(0)
300#define EXT_REQ_EN s2BIT(1)
301#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
302
303 u8 unused7_1[0x230 - 0x208];
304
305 u64 pic_control2;
306 u64 ini_dperr_ctrl;
307
308 u64 wreq_split_mask;
309#define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12)
310
311 u8 unused7_2[0x800 - 0x248];
312
313/* TxDMA registers */
314 u64 txdma_int_status;
315 u64 txdma_int_mask;
316#define TXDMA_PFC_INT s2BIT(0)
317#define TXDMA_TDA_INT s2BIT(1)
318#define TXDMA_PCC_INT s2BIT(2)
319#define TXDMA_TTI_INT s2BIT(3)
320#define TXDMA_LSO_INT s2BIT(4)
321#define TXDMA_TPA_INT s2BIT(5)
322#define TXDMA_SM_INT s2BIT(6)
323 u64 pfc_err_reg;
324#define PFC_ECC_SG_ERR s2BIT(7)
325#define PFC_ECC_DB_ERR s2BIT(15)
326#define PFC_SM_ERR_ALARM s2BIT(23)
327#define PFC_MISC_0_ERR s2BIT(31)
328#define PFC_MISC_1_ERR s2BIT(32)
329#define PFC_PCIX_ERR s2BIT(39)
330 u64 pfc_err_mask;
331 u64 pfc_err_alarm;
332
333 u64 tda_err_reg;
334#define TDA_Fn_ECC_SG_ERR vBIT(0xff,0,8)
335#define TDA_Fn_ECC_DB_ERR vBIT(0xff,8,8)
336#define TDA_SM0_ERR_ALARM s2BIT(22)
337#define TDA_SM1_ERR_ALARM s2BIT(23)
338#define TDA_PCIX_ERR s2BIT(39)
339 u64 tda_err_mask;
340 u64 tda_err_alarm;
341
342 u64 pcc_err_reg;
343#define PCC_FB_ECC_SG_ERR vBIT(0xFF,0,8)
344#define PCC_TXB_ECC_SG_ERR vBIT(0xFF,8,8)
345#define PCC_FB_ECC_DB_ERR vBIT(0xFF,16, 8)
346#define PCC_TXB_ECC_DB_ERR vBIT(0xff,24,8)
347#define PCC_SM_ERR_ALARM vBIT(0xff,32,8)
348#define PCC_WR_ERR_ALARM vBIT(0xff,40,8)
349#define PCC_N_SERR vBIT(0xff,48,8)
350#define PCC_6_COF_OV_ERR s2BIT(56)
351#define PCC_7_COF_OV_ERR s2BIT(57)
352#define PCC_6_LSO_OV_ERR s2BIT(58)
353#define PCC_7_LSO_OV_ERR s2BIT(59)
354#define PCC_ENABLE_FOUR vBIT(0x0F,0,8)
355 u64 pcc_err_mask;
356 u64 pcc_err_alarm;
357
358 u64 tti_err_reg;
359#define TTI_ECC_SG_ERR s2BIT(7)
360#define TTI_ECC_DB_ERR s2BIT(15)
361#define TTI_SM_ERR_ALARM s2BIT(23)
362 u64 tti_err_mask;
363 u64 tti_err_alarm;
364
365 u64 lso_err_reg;
366#define LSO6_SEND_OFLOW s2BIT(12)
367#define LSO7_SEND_OFLOW s2BIT(13)
368#define LSO6_ABORT s2BIT(14)
369#define LSO7_ABORT s2BIT(15)
370#define LSO6_SM_ERR_ALARM s2BIT(22)
371#define LSO7_SM_ERR_ALARM s2BIT(23)
372 u64 lso_err_mask;
373 u64 lso_err_alarm;
374
375 u64 tpa_err_reg;
376#define TPA_TX_FRM_DROP s2BIT(7)
377#define TPA_SM_ERR_ALARM s2BIT(23)
378
379 u64 tpa_err_mask;
380 u64 tpa_err_alarm;
381
382 u64 sm_err_reg;
383#define SM_SM_ERR_ALARM s2BIT(15)
384 u64 sm_err_mask;
385 u64 sm_err_alarm;
386
387 u8 unused8[0x100 - 0xB8];
388
389/* TxDMA arbiter */
390 u64 tx_dma_wrap_stat;
391
392/* Tx FIFO controller */
393#define X_MAX_FIFOS 8
394#define X_FIFO_MAX_LEN 0x1FFF /*8191 */
395 u64 tx_fifo_partition_0;
396#define TX_FIFO_PARTITION_EN s2BIT(0)
397#define TX_FIFO_PARTITION_0_PRI(val) vBIT(val,5,3)
398#define TX_FIFO_PARTITION_0_LEN(val) vBIT(val,19,13)
399#define TX_FIFO_PARTITION_1_PRI(val) vBIT(val,37,3)
400#define TX_FIFO_PARTITION_1_LEN(val) vBIT(val,51,13 )
401
402 u64 tx_fifo_partition_1;
403#define TX_FIFO_PARTITION_2_PRI(val) vBIT(val,5,3)
404#define TX_FIFO_PARTITION_2_LEN(val) vBIT(val,19,13)
405#define TX_FIFO_PARTITION_3_PRI(val) vBIT(val,37,3)
406#define TX_FIFO_PARTITION_3_LEN(val) vBIT(val,51,13)
407
408 u64 tx_fifo_partition_2;
409#define TX_FIFO_PARTITION_4_PRI(val) vBIT(val,5,3)
410#define TX_FIFO_PARTITION_4_LEN(val) vBIT(val,19,13)
411#define TX_FIFO_PARTITION_5_PRI(val) vBIT(val,37,3)
412#define TX_FIFO_PARTITION_5_LEN(val) vBIT(val,51,13)
413
414 u64 tx_fifo_partition_3;
415#define TX_FIFO_PARTITION_6_PRI(val) vBIT(val,5,3)
416#define TX_FIFO_PARTITION_6_LEN(val) vBIT(val,19,13)
417#define TX_FIFO_PARTITION_7_PRI(val) vBIT(val,37,3)
418#define TX_FIFO_PARTITION_7_LEN(val) vBIT(val,51,13)
419
420#define TX_FIFO_PARTITION_PRI_0 0 /* highest */
421#define TX_FIFO_PARTITION_PRI_1 1
422#define TX_FIFO_PARTITION_PRI_2 2
423#define TX_FIFO_PARTITION_PRI_3 3
424#define TX_FIFO_PARTITION_PRI_4 4
425#define TX_FIFO_PARTITION_PRI_5 5
426#define TX_FIFO_PARTITION_PRI_6 6
427#define TX_FIFO_PARTITION_PRI_7 7 /* lowest */
428
429 u64 tx_w_round_robin_0;
430 u64 tx_w_round_robin_1;
431 u64 tx_w_round_robin_2;
432 u64 tx_w_round_robin_3;
433 u64 tx_w_round_robin_4;
434
435 u64 tti_command_mem;
436#define TTI_CMD_MEM_WE s2BIT(7)
437#define TTI_CMD_MEM_STROBE_NEW_CMD s2BIT(15)
438#define TTI_CMD_MEM_STROBE_BEING_EXECUTED s2BIT(15)
439#define TTI_CMD_MEM_OFFSET(n) vBIT(n,26,6)
440
441 u64 tti_data1_mem;
442#define TTI_DATA1_MEM_TX_TIMER_VAL(n) vBIT(n,6,26)
443#define TTI_DATA1_MEM_TX_TIMER_AC_CI(n) vBIT(n,38,2)
444#define TTI_DATA1_MEM_TX_TIMER_AC_EN s2BIT(38)
445#define TTI_DATA1_MEM_TX_TIMER_CI_EN s2BIT(39)
446#define TTI_DATA1_MEM_TX_URNG_A(n) vBIT(n,41,7)
447#define TTI_DATA1_MEM_TX_URNG_B(n) vBIT(n,49,7)
448#define TTI_DATA1_MEM_TX_URNG_C(n) vBIT(n,57,7)
449
450 u64 tti_data2_mem;
451#define TTI_DATA2_MEM_TX_UFC_A(n) vBIT(n,0,16)
452#define TTI_DATA2_MEM_TX_UFC_B(n) vBIT(n,16,16)
453#define TTI_DATA2_MEM_TX_UFC_C(n) vBIT(n,32,16)
454#define TTI_DATA2_MEM_TX_UFC_D(n) vBIT(n,48,16)
455
456/* Tx Protocol assist */
457 u64 tx_pa_cfg;
458#define TX_PA_CFG_IGNORE_FRM_ERR s2BIT(1)
459#define TX_PA_CFG_IGNORE_SNAP_OUI s2BIT(2)
460#define TX_PA_CFG_IGNORE_LLC_CTRL s2BIT(3)
461#define TX_PA_CFG_IGNORE_L2_ERR s2BIT(6)
462#define RX_PA_CFG_STRIP_VLAN_TAG s2BIT(15)
463
464/* Recent add, used only debug purposes. */
465 u64 pcc_enable;
466
467 u8 unused9[0x700 - 0x178];
468
469 u64 txdma_debug_ctrl;
470
471 u8 unused10[0x1800 - 0x1708];
472
473/* RxDMA Registers */
474 u64 rxdma_int_status;
475 u64 rxdma_int_mask;
476#define RXDMA_INT_RC_INT_M s2BIT(0)
477#define RXDMA_INT_RPA_INT_M s2BIT(1)
478#define RXDMA_INT_RDA_INT_M s2BIT(2)
479#define RXDMA_INT_RTI_INT_M s2BIT(3)
480
481 u64 rda_err_reg;
482#define RDA_RXDn_ECC_SG_ERR vBIT(0xFF,0,8)
483#define RDA_RXDn_ECC_DB_ERR vBIT(0xFF,8,8)
484#define RDA_FRM_ECC_SG_ERR s2BIT(23)
485#define RDA_FRM_ECC_DB_N_AERR s2BIT(31)
486#define RDA_SM1_ERR_ALARM s2BIT(38)
487#define RDA_SM0_ERR_ALARM s2BIT(39)
488#define RDA_MISC_ERR s2BIT(47)
489#define RDA_PCIX_ERR s2BIT(55)
490#define RDA_RXD_ECC_DB_SERR s2BIT(63)
491 u64 rda_err_mask;
492 u64 rda_err_alarm;
493
494 u64 rc_err_reg;
495#define RC_PRCn_ECC_SG_ERR vBIT(0xFF,0,8)
496#define RC_PRCn_ECC_DB_ERR vBIT(0xFF,8,8)
497#define RC_FTC_ECC_SG_ERR s2BIT(23)
498#define RC_FTC_ECC_DB_ERR s2BIT(31)
499#define RC_PRCn_SM_ERR_ALARM vBIT(0xFF,32,8)
500#define RC_FTC_SM_ERR_ALARM s2BIT(47)
501#define RC_RDA_FAIL_WR_Rn vBIT(0xFF,48,8)
502 u64 rc_err_mask;
503 u64 rc_err_alarm;
504
505 u64 prc_pcix_err_reg;
506#define PRC_PCI_AB_RD_Rn vBIT(0xFF,0,8)
507#define PRC_PCI_DP_RD_Rn vBIT(0xFF,8,8)
508#define PRC_PCI_AB_WR_Rn vBIT(0xFF,16,8)
509#define PRC_PCI_DP_WR_Rn vBIT(0xFF,24,8)
510#define PRC_PCI_AB_F_WR_Rn vBIT(0xFF,32,8)
511#define PRC_PCI_DP_F_WR_Rn vBIT(0xFF,40,8)
512 u64 prc_pcix_err_mask;
513 u64 prc_pcix_err_alarm;
514
515 u64 rpa_err_reg;
516#define RPA_ECC_SG_ERR s2BIT(7)
517#define RPA_ECC_DB_ERR s2BIT(15)
518#define RPA_FLUSH_REQUEST s2BIT(22)
519#define RPA_SM_ERR_ALARM s2BIT(23)
520#define RPA_CREDIT_ERR s2BIT(31)
521 u64 rpa_err_mask;
522 u64 rpa_err_alarm;
523
524 u64 rti_err_reg;
525#define RTI_ECC_SG_ERR s2BIT(7)
526#define RTI_ECC_DB_ERR s2BIT(15)
527#define RTI_SM_ERR_ALARM s2BIT(23)
528 u64 rti_err_mask;
529 u64 rti_err_alarm;
530
531 u8 unused11[0x100 - 0x88];
532
533/* DMA arbiter */
534 u64 rx_queue_priority;
535#define RX_QUEUE_0_PRIORITY(val) vBIT(val,5,3)
536#define RX_QUEUE_1_PRIORITY(val) vBIT(val,13,3)
537#define RX_QUEUE_2_PRIORITY(val) vBIT(val,21,3)
538#define RX_QUEUE_3_PRIORITY(val) vBIT(val,29,3)
539#define RX_QUEUE_4_PRIORITY(val) vBIT(val,37,3)
540#define RX_QUEUE_5_PRIORITY(val) vBIT(val,45,3)
541#define RX_QUEUE_6_PRIORITY(val) vBIT(val,53,3)
542#define RX_QUEUE_7_PRIORITY(val) vBIT(val,61,3)
543
544#define RX_QUEUE_PRI_0 0 /* highest */
545#define RX_QUEUE_PRI_1 1
546#define RX_QUEUE_PRI_2 2
547#define RX_QUEUE_PRI_3 3
548#define RX_QUEUE_PRI_4 4
549#define RX_QUEUE_PRI_5 5
550#define RX_QUEUE_PRI_6 6
551#define RX_QUEUE_PRI_7 7 /* lowest */
552
553 u64 rx_w_round_robin_0;
554 u64 rx_w_round_robin_1;
555 u64 rx_w_round_robin_2;
556 u64 rx_w_round_robin_3;
557 u64 rx_w_round_robin_4;
558
559 /* Per-ring controller regs */
560#define RX_MAX_RINGS 8
561#if 0
562#define RX_MAX_RINGS_SZ 0xFFFF /* 65536 */
563#define RX_MIN_RINGS_SZ 0x3F /* 63 */
564#endif
565 u64 prc_rxd0_n[RX_MAX_RINGS];
566 u64 prc_ctrl_n[RX_MAX_RINGS];
567#define PRC_CTRL_RC_ENABLED s2BIT(7)
568#define PRC_CTRL_RING_MODE (s2BIT(14)|s2BIT(15))
569#define PRC_CTRL_RING_MODE_1 vBIT(0,14,2)
570#define PRC_CTRL_RING_MODE_3 vBIT(1,14,2)
571#define PRC_CTRL_RING_MODE_5 vBIT(2,14,2)
572#define PRC_CTRL_RING_MODE_x vBIT(3,14,2)
573#define PRC_CTRL_NO_SNOOP (s2BIT(22)|s2BIT(23))
574#define PRC_CTRL_NO_SNOOP_DESC s2BIT(22)
575#define PRC_CTRL_NO_SNOOP_BUFF s2BIT(23)
576#define PRC_CTRL_BIMODAL_INTERRUPT s2BIT(37)
577#define PRC_CTRL_GROUP_READS s2BIT(38)
578#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
579
580 u64 prc_alarm_action;
581#define PRC_ALARM_ACTION_RR_R0_STOP s2BIT(3)
582#define PRC_ALARM_ACTION_RW_R0_STOP s2BIT(7)
583#define PRC_ALARM_ACTION_RR_R1_STOP s2BIT(11)
584#define PRC_ALARM_ACTION_RW_R1_STOP s2BIT(15)
585#define PRC_ALARM_ACTION_RR_R2_STOP s2BIT(19)
586#define PRC_ALARM_ACTION_RW_R2_STOP s2BIT(23)
587#define PRC_ALARM_ACTION_RR_R3_STOP s2BIT(27)
588#define PRC_ALARM_ACTION_RW_R3_STOP s2BIT(31)
589#define PRC_ALARM_ACTION_RR_R4_STOP s2BIT(35)
590#define PRC_ALARM_ACTION_RW_R4_STOP s2BIT(39)
591#define PRC_ALARM_ACTION_RR_R5_STOP s2BIT(43)
592#define PRC_ALARM_ACTION_RW_R5_STOP s2BIT(47)
593#define PRC_ALARM_ACTION_RR_R6_STOP s2BIT(51)
594#define PRC_ALARM_ACTION_RW_R6_STOP s2BIT(55)
595#define PRC_ALARM_ACTION_RR_R7_STOP s2BIT(59)
596#define PRC_ALARM_ACTION_RW_R7_STOP s2BIT(63)
597
598/* Receive traffic interrupts */
599 u64 rti_command_mem;
600#define RTI_CMD_MEM_WE s2BIT(7)
601#define RTI_CMD_MEM_STROBE s2BIT(15)
602#define RTI_CMD_MEM_STROBE_NEW_CMD s2BIT(15)
603#define RTI_CMD_MEM_STROBE_CMD_BEING_EXECUTED s2BIT(15)
604#define RTI_CMD_MEM_OFFSET(n) vBIT(n,29,3)
605
606 u64 rti_data1_mem;
607#define RTI_DATA1_MEM_RX_TIMER_VAL(n) vBIT(n,3,29)
608#define RTI_DATA1_MEM_RX_TIMER_AC_EN s2BIT(38)
609#define RTI_DATA1_MEM_RX_TIMER_CI_EN s2BIT(39)
610#define RTI_DATA1_MEM_RX_URNG_A(n) vBIT(n,41,7)
611#define RTI_DATA1_MEM_RX_URNG_B(n) vBIT(n,49,7)
612#define RTI_DATA1_MEM_RX_URNG_C(n) vBIT(n,57,7)
613
614 u64 rti_data2_mem;
615#define RTI_DATA2_MEM_RX_UFC_A(n) vBIT(n,0,16)
616#define RTI_DATA2_MEM_RX_UFC_B(n) vBIT(n,16,16)
617#define RTI_DATA2_MEM_RX_UFC_C(n) vBIT(n,32,16)
618#define RTI_DATA2_MEM_RX_UFC_D(n) vBIT(n,48,16)
619
620 u64 rx_pa_cfg;
621#define RX_PA_CFG_IGNORE_FRM_ERR s2BIT(1)
622#define RX_PA_CFG_IGNORE_SNAP_OUI s2BIT(2)
623#define RX_PA_CFG_IGNORE_LLC_CTRL s2BIT(3)
624#define RX_PA_CFG_IGNORE_L2_ERR s2BIT(6)
625
626 u64 unused_11_1;
627
628 u64 ring_bump_counter1;
629 u64 ring_bump_counter2;
630
631 u8 unused12[0x700 - 0x1F0];
632
633 u64 rxdma_debug_ctrl;
634
635 u8 unused13[0x2000 - 0x1f08];
636
637/* Media Access Controller Register */
638 u64 mac_int_status;
639 u64 mac_int_mask;
640#define MAC_INT_STATUS_TMAC_INT s2BIT(0)
641#define MAC_INT_STATUS_RMAC_INT s2BIT(1)
642
643 u64 mac_tmac_err_reg;
644#define TMAC_ECC_SG_ERR s2BIT(7)
645#define TMAC_ECC_DB_ERR s2BIT(15)
646#define TMAC_TX_BUF_OVRN s2BIT(23)
647#define TMAC_TX_CRI_ERR s2BIT(31)
648#define TMAC_TX_SM_ERR s2BIT(39)
649#define TMAC_DESC_ECC_SG_ERR s2BIT(47)
650#define TMAC_DESC_ECC_DB_ERR s2BIT(55)
651
652 u64 mac_tmac_err_mask;
653 u64 mac_tmac_err_alarm;
654
655 u64 mac_rmac_err_reg;
656#define RMAC_RX_BUFF_OVRN s2BIT(0)
657#define RMAC_FRM_RCVD_INT s2BIT(1)
658#define RMAC_UNUSED_INT s2BIT(2)
659#define RMAC_RTS_PNUM_ECC_SG_ERR s2BIT(5)
660#define RMAC_RTS_DS_ECC_SG_ERR s2BIT(6)
661#define RMAC_RD_BUF_ECC_SG_ERR s2BIT(7)
662#define RMAC_RTH_MAP_ECC_SG_ERR s2BIT(8)
663#define RMAC_RTH_SPDM_ECC_SG_ERR s2BIT(9)
664#define RMAC_RTS_VID_ECC_SG_ERR s2BIT(10)
665#define RMAC_DA_SHADOW_ECC_SG_ERR s2BIT(11)
666#define RMAC_RTS_PNUM_ECC_DB_ERR s2BIT(13)
667#define RMAC_RTS_DS_ECC_DB_ERR s2BIT(14)
668#define RMAC_RD_BUF_ECC_DB_ERR s2BIT(15)
669#define RMAC_RTH_MAP_ECC_DB_ERR s2BIT(16)
670#define RMAC_RTH_SPDM_ECC_DB_ERR s2BIT(17)
671#define RMAC_RTS_VID_ECC_DB_ERR s2BIT(18)
672#define RMAC_DA_SHADOW_ECC_DB_ERR s2BIT(19)
673#define RMAC_LINK_STATE_CHANGE_INT s2BIT(31)
674#define RMAC_RX_SM_ERR s2BIT(39)
675#define RMAC_SINGLE_ECC_ERR (s2BIT(5) | s2BIT(6) | s2BIT(7) |\
676 s2BIT(8) | s2BIT(9) | s2BIT(10)|\
677 s2BIT(11))
678#define RMAC_DOUBLE_ECC_ERR (s2BIT(13) | s2BIT(14) | s2BIT(15) |\
679 s2BIT(16) | s2BIT(17) | s2BIT(18)|\
680 s2BIT(19))
681 u64 mac_rmac_err_mask;
682 u64 mac_rmac_err_alarm;
683
684 u8 unused14[0x100 - 0x40];
685
686 u64 mac_cfg;
687#define MAC_CFG_TMAC_ENABLE s2BIT(0)
688#define MAC_CFG_RMAC_ENABLE s2BIT(1)
689#define MAC_CFG_LAN_NOT_WAN s2BIT(2)
690#define MAC_CFG_TMAC_LOOPBACK s2BIT(3)
691#define MAC_CFG_TMAC_APPEND_PAD s2BIT(4)
692#define MAC_CFG_RMAC_STRIP_FCS s2BIT(5)
693#define MAC_CFG_RMAC_STRIP_PAD s2BIT(6)
694#define MAC_CFG_RMAC_PROM_ENABLE s2BIT(7)
695#define MAC_RMAC_DISCARD_PFRM s2BIT(8)
696#define MAC_RMAC_BCAST_ENABLE s2BIT(9)
697#define MAC_RMAC_ALL_ADDR_ENABLE s2BIT(10)
698#define MAC_RMAC_INVLD_IPG_THR(val) vBIT(val,16,8)
699
700 u64 tmac_avg_ipg;
701#define TMAC_AVG_IPG(val) vBIT(val,0,8)
702
703 u64 rmac_max_pyld_len;
704#define RMAC_MAX_PYLD_LEN(val) vBIT(val,2,14)
705#define RMAC_MAX_PYLD_LEN_DEF vBIT(1500,2,14)
706#define RMAC_MAX_PYLD_LEN_JUMBO_DEF vBIT(9600,2,14)
707
708 u64 rmac_err_cfg;
709#define RMAC_ERR_FCS s2BIT(0)
710#define RMAC_ERR_FCS_ACCEPT s2BIT(1)
711#define RMAC_ERR_TOO_LONG s2BIT(1)
712#define RMAC_ERR_TOO_LONG_ACCEPT s2BIT(1)
713#define RMAC_ERR_RUNT s2BIT(2)
714#define RMAC_ERR_RUNT_ACCEPT s2BIT(2)
715#define RMAC_ERR_LEN_MISMATCH s2BIT(3)
716#define RMAC_ERR_LEN_MISMATCH_ACCEPT s2BIT(3)
717
718 u64 rmac_cfg_key;
719#define RMAC_CFG_KEY(val) vBIT(val,0,16)
720
721#define S2IO_MAC_ADDR_START_OFFSET 0
722
723#define S2IO_XENA_MAX_MC_ADDRESSES 64 /* multicast addresses */
724#define S2IO_HERC_MAX_MC_ADDRESSES 256
725
726#define S2IO_XENA_MAX_MAC_ADDRESSES 16
727#define S2IO_HERC_MAX_MAC_ADDRESSES 64
728
729#define S2IO_XENA_MC_ADDR_START_OFFSET 16
730#define S2IO_HERC_MC_ADDR_START_OFFSET 64
731
732 u64 rmac_addr_cmd_mem;
733#define RMAC_ADDR_CMD_MEM_WE s2BIT(7)
734#define RMAC_ADDR_CMD_MEM_RD 0
735#define RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD s2BIT(15)
736#define RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING s2BIT(15)
737#define RMAC_ADDR_CMD_MEM_OFFSET(n) vBIT(n,26,6)
738
739 u64 rmac_addr_data0_mem;
740#define RMAC_ADDR_DATA0_MEM_ADDR(n) vBIT(n,0,48)
741#define RMAC_ADDR_DATA0_MEM_USER s2BIT(48)
742
743 u64 rmac_addr_data1_mem;
744#define RMAC_ADDR_DATA1_MEM_MASK(n) vBIT(n,0,48)
745
746 u8 unused15[0x8];
747
748/*
749 u64 rmac_addr_cfg;
750#define RMAC_ADDR_UCASTn_EN(n) mBIT(0)_n(n)
751#define RMAC_ADDR_MCASTn_EN(n) mBIT(0)_n(n)
752#define RMAC_ADDR_BCAST_EN vBIT(0)_48
753#define RMAC_ADDR_ALL_ADDR_EN vBIT(0)_49
754*/
755 u64 tmac_ipg_cfg;
756
757 u64 rmac_pause_cfg;
758#define RMAC_PAUSE_GEN s2BIT(0)
759#define RMAC_PAUSE_GEN_ENABLE s2BIT(0)
760#define RMAC_PAUSE_RX s2BIT(1)
761#define RMAC_PAUSE_RX_ENABLE s2BIT(1)
762#define RMAC_PAUSE_HG_PTIME_DEF vBIT(0xFFFF,16,16)
763#define RMAC_PAUSE_HG_PTIME(val) vBIT(val,16,16)
764
765 u64 rmac_red_cfg;
766
767 u64 rmac_red_rate_q0q3;
768 u64 rmac_red_rate_q4q7;
769
770 u64 mac_link_util;
771#define MAC_TX_LINK_UTIL vBIT(0xFE,1,7)
772#define MAC_TX_LINK_UTIL_DISABLE vBIT(0xF, 8,4)
773#define MAC_TX_LINK_UTIL_VAL( n ) vBIT(n,8,4)
774#define MAC_RX_LINK_UTIL vBIT(0xFE,33,7)
775#define MAC_RX_LINK_UTIL_DISABLE vBIT(0xF,40,4)
776#define MAC_RX_LINK_UTIL_VAL( n ) vBIT(n,40,4)
777
778#define MAC_LINK_UTIL_DISABLE MAC_TX_LINK_UTIL_DISABLE | \
779 MAC_RX_LINK_UTIL_DISABLE
780
781 u64 rmac_invalid_ipg;
782
783/* rx traffic steering */
784#define MAC_RTS_FRM_LEN_SET(len) vBIT(len,2,14)
785 u64 rts_frm_len_n[8];
786
787 u64 rts_qos_steering;
788
789#define MAX_DIX_MAP 4
790 u64 rts_dix_map_n[MAX_DIX_MAP];
791#define RTS_DIX_MAP_ETYPE(val) vBIT(val,0,16)
792#define RTS_DIX_MAP_SCW(val) s2BIT(val,21)
793
794 u64 rts_q_alternates;
795 u64 rts_default_q;
796
797 u64 rts_ctrl;
798#define RTS_CTRL_IGNORE_SNAP_OUI s2BIT(2)
799#define RTS_CTRL_IGNORE_LLC_CTRL s2BIT(3)
800
801 u64 rts_pn_cam_ctrl;
802#define RTS_PN_CAM_CTRL_WE s2BIT(7)
803#define RTS_PN_CAM_CTRL_STROBE_NEW_CMD s2BIT(15)
804#define RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED s2BIT(15)
805#define RTS_PN_CAM_CTRL_OFFSET(n) vBIT(n,24,8)
806 u64 rts_pn_cam_data;
807#define RTS_PN_CAM_DATA_TCP_SELECT s2BIT(7)
808#define RTS_PN_CAM_DATA_PORT(val) vBIT(val,8,16)
809#define RTS_PN_CAM_DATA_SCW(val) vBIT(val,24,8)
810
811 u64 rts_ds_mem_ctrl;
812#define RTS_DS_MEM_CTRL_WE s2BIT(7)
813#define RTS_DS_MEM_CTRL_STROBE_NEW_CMD s2BIT(15)
814#define RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED s2BIT(15)
815#define RTS_DS_MEM_CTRL_OFFSET(n) vBIT(n,26,6)
816 u64 rts_ds_mem_data;
817#define RTS_DS_MEM_DATA(n) vBIT(n,0,8)
818
819 u8 unused16[0x700 - 0x220];
820
821 u64 mac_debug_ctrl;
822#define MAC_DBG_ACTIVITY_VALUE 0x411040400000000ULL
823
824 u8 unused17[0x2800 - 0x2708];
825
826/* memory controller registers */
827 u64 mc_int_status;
828#define MC_INT_STATUS_MC_INT s2BIT(0)
829 u64 mc_int_mask;
830#define MC_INT_MASK_MC_INT s2BIT(0)
831
832 u64 mc_err_reg;
833#define MC_ERR_REG_ECC_DB_ERR_L s2BIT(14)
834#define MC_ERR_REG_ECC_DB_ERR_U s2BIT(15)
835#define MC_ERR_REG_MIRI_ECC_DB_ERR_0 s2BIT(18)
836#define MC_ERR_REG_MIRI_ECC_DB_ERR_1 s2BIT(20)
837#define MC_ERR_REG_MIRI_CRI_ERR_0 s2BIT(22)
838#define MC_ERR_REG_MIRI_CRI_ERR_1 s2BIT(23)
839#define MC_ERR_REG_SM_ERR s2BIT(31)
840#define MC_ERR_REG_ECC_ALL_SNG (s2BIT(2) | s2BIT(3) | s2BIT(4) | s2BIT(5) |\
841 s2BIT(17) | s2BIT(19))
842#define MC_ERR_REG_ECC_ALL_DBL (s2BIT(10) | s2BIT(11) | s2BIT(12) |\
843 s2BIT(13) | s2BIT(18) | s2BIT(20))
844#define PLL_LOCK_N s2BIT(39)
845 u64 mc_err_mask;
846 u64 mc_err_alarm;
847
848 u8 unused18[0x100 - 0x28];
849
850/* MC configuration */
851 u64 rx_queue_cfg;
852#define RX_QUEUE_CFG_Q0_SZ(n) vBIT(n,0,8)
853#define RX_QUEUE_CFG_Q1_SZ(n) vBIT(n,8,8)
854#define RX_QUEUE_CFG_Q2_SZ(n) vBIT(n,16,8)
855#define RX_QUEUE_CFG_Q3_SZ(n) vBIT(n,24,8)
856#define RX_QUEUE_CFG_Q4_SZ(n) vBIT(n,32,8)
857#define RX_QUEUE_CFG_Q5_SZ(n) vBIT(n,40,8)
858#define RX_QUEUE_CFG_Q6_SZ(n) vBIT(n,48,8)
859#define RX_QUEUE_CFG_Q7_SZ(n) vBIT(n,56,8)
860
861 u64 mc_rldram_mrs;
862#define MC_RLDRAM_QUEUE_SIZE_ENABLE s2BIT(39)
863#define MC_RLDRAM_MRS_ENABLE s2BIT(47)
864
865 u64 mc_rldram_interleave;
866
867 u64 mc_pause_thresh_q0q3;
868 u64 mc_pause_thresh_q4q7;
869
870 u64 mc_red_thresh_q[8];
871
872 u8 unused19[0x200 - 0x168];
873 u64 mc_rldram_ref_per;
874 u8 unused20[0x220 - 0x208];
875 u64 mc_rldram_test_ctrl;
876#define MC_RLDRAM_TEST_MODE s2BIT(47)
877#define MC_RLDRAM_TEST_WRITE s2BIT(7)
878#define MC_RLDRAM_TEST_GO s2BIT(15)
879#define MC_RLDRAM_TEST_DONE s2BIT(23)
880#define MC_RLDRAM_TEST_PASS s2BIT(31)
881
882 u8 unused21[0x240 - 0x228];
883 u64 mc_rldram_test_add;
884 u8 unused22[0x260 - 0x248];
885 u64 mc_rldram_test_d0;
886 u8 unused23[0x280 - 0x268];
887 u64 mc_rldram_test_d1;
888 u8 unused24[0x300 - 0x288];
889 u64 mc_rldram_test_d2;
890
891 u8 unused24_1[0x360 - 0x308];
892 u64 mc_rldram_ctrl;
893#define MC_RLDRAM_ENABLE_ODT s2BIT(7)
894
895 u8 unused24_2[0x640 - 0x368];
896 u64 mc_rldram_ref_per_herc;
897#define MC_RLDRAM_SET_REF_PERIOD(val) vBIT(val, 0, 16)
898
899 u8 unused24_3[0x660 - 0x648];
900 u64 mc_rldram_mrs_herc;
901
902 u8 unused25[0x700 - 0x668];
903 u64 mc_debug_ctrl;
904
905 u8 unused26[0x3000 - 0x2f08];
906
907/* XGXG */
908 /* XGXS control registers */
909
910 u64 xgxs_int_status;
911#define XGXS_INT_STATUS_TXGXS s2BIT(0)
912#define XGXS_INT_STATUS_RXGXS s2BIT(1)
913 u64 xgxs_int_mask;
914#define XGXS_INT_MASK_TXGXS s2BIT(0)
915#define XGXS_INT_MASK_RXGXS s2BIT(1)
916
917 u64 xgxs_txgxs_err_reg;
918#define TXGXS_ECC_SG_ERR s2BIT(7)
919#define TXGXS_ECC_DB_ERR s2BIT(15)
920#define TXGXS_ESTORE_UFLOW s2BIT(31)
921#define TXGXS_TX_SM_ERR s2BIT(39)
922
923 u64 xgxs_txgxs_err_mask;
924 u64 xgxs_txgxs_err_alarm;
925
926 u64 xgxs_rxgxs_err_reg;
927#define RXGXS_ESTORE_OFLOW s2BIT(7)
928#define RXGXS_RX_SM_ERR s2BIT(39)
929 u64 xgxs_rxgxs_err_mask;
930 u64 xgxs_rxgxs_err_alarm;
931
932 u8 unused27[0x100 - 0x40];
933
934 u64 xgxs_cfg;
935 u64 xgxs_status;
936
937 u64 xgxs_cfg_key;
938 u64 xgxs_efifo_cfg; /* CHANGED */
939 u64 rxgxs_ber_0; /* CHANGED */
940 u64 rxgxs_ber_1; /* CHANGED */
941
942 u64 spi_control;
943#define SPI_CONTROL_KEY(key) vBIT(key,0,4)
944#define SPI_CONTROL_BYTECNT(cnt) vBIT(cnt,29,3)
945#define SPI_CONTROL_CMD(cmd) vBIT(cmd,32,8)
946#define SPI_CONTROL_ADDR(addr) vBIT(addr,40,24)
947#define SPI_CONTROL_SEL1 s2BIT(4)
948#define SPI_CONTROL_REQ s2BIT(7)
949#define SPI_CONTROL_NACK s2BIT(5)
950#define SPI_CONTROL_DONE s2BIT(6)
951 u64 spi_data;
952#define SPI_DATA_WRITE(data,len) vBIT(data,0,len)
953};
954
955#define XENA_REG_SPACE sizeof(struct XENA_dev_config)
956#define XENA_EEPROM_SPACE (0x01 << 11)
957
958#endif /* _REGS_H */
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
new file mode 100644
index 000000000000..277d48b0800a
--- /dev/null
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -0,0 +1,8674 @@
1/************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2010 Exar Corp.
4 *
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explanation of all the variables.
29 *
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro_max_pkts: This parameter defines maximum number of packets can be
42 * aggregated as a single large packet
43 * napi: This parameter used to enable/disable NAPI (polling Rx)
44 * Possible values '1' for enable and '0' for disable. Default is '1'
45 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
46 * Possible values '1' for enable and '0' for disable. Default is '0'
47 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
48 * Possible values '1' for enable , '0' for disable.
49 * Default is '2' - which means disable in promisc mode
50 * and enable in non-promiscuous mode.
51 * multiq: This parameter used to enable/disable MULTIQUEUE support.
52 * Possible values '1' for enable and '0' for disable. Default is '0'
53 ************************************************************************/
54
55#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/errno.h>
60#include <linux/ioport.h>
61#include <linux/pci.h>
62#include <linux/dma-mapping.h>
63#include <linux/kernel.h>
64#include <linux/netdevice.h>
65#include <linux/etherdevice.h>
66#include <linux/mdio.h>
67#include <linux/skbuff.h>
68#include <linux/init.h>
69#include <linux/delay.h>
70#include <linux/stddef.h>
71#include <linux/ioctl.h>
72#include <linux/timex.h>
73#include <linux/ethtool.h>
74#include <linux/workqueue.h>
75#include <linux/if_vlan.h>
76#include <linux/ip.h>
77#include <linux/tcp.h>
78#include <linux/uaccess.h>
79#include <linux/io.h>
80#include <linux/slab.h>
81#include <linux/prefetch.h>
82#include <net/tcp.h>
83
84#include <asm/system.h>
85#include <asm/div64.h>
86#include <asm/irq.h>
87
88/* local include */
89#include "s2io.h"
90#include "s2io-regs.h"
91
92#define DRV_VERSION "2.0.26.28"
93
94/* S2io Driver name & version. */
95static const char s2io_driver_name[] = "Neterion";
96static const char s2io_driver_version[] = DRV_VERSION;
97
98static const int rxd_size[2] = {32, 48};
99static const int rxd_count[2] = {127, 85};
100
101static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
102{
103 int ret;
104
105 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
106 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107
108 return ret;
109}
110
111/*
112 * Cards with following subsystem_id have a link state indication
113 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
114 * macro below identifies these cards given the subsystem_id.
115 */
116#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
117 (dev_type == XFRAME_I_DEVICE) ? \
118 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
119 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
120
121#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
122 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
123
124static inline int is_s2io_card_up(const struct s2io_nic *sp)
125{
126 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
127}
128
129/* Ethtool related variables and Macros. */
130static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
131 "Register test\t(offline)",
132 "Eeprom test\t(offline)",
133 "Link test\t(online)",
134 "RLDRAM test\t(offline)",
135 "BIST Test\t(offline)"
136};
137
138static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
139 {"tmac_frms"},
140 {"tmac_data_octets"},
141 {"tmac_drop_frms"},
142 {"tmac_mcst_frms"},
143 {"tmac_bcst_frms"},
144 {"tmac_pause_ctrl_frms"},
145 {"tmac_ttl_octets"},
146 {"tmac_ucst_frms"},
147 {"tmac_nucst_frms"},
148 {"tmac_any_err_frms"},
149 {"tmac_ttl_less_fb_octets"},
150 {"tmac_vld_ip_octets"},
151 {"tmac_vld_ip"},
152 {"tmac_drop_ip"},
153 {"tmac_icmp"},
154 {"tmac_rst_tcp"},
155 {"tmac_tcp"},
156 {"tmac_udp"},
157 {"rmac_vld_frms"},
158 {"rmac_data_octets"},
159 {"rmac_fcs_err_frms"},
160 {"rmac_drop_frms"},
161 {"rmac_vld_mcst_frms"},
162 {"rmac_vld_bcst_frms"},
163 {"rmac_in_rng_len_err_frms"},
164 {"rmac_out_rng_len_err_frms"},
165 {"rmac_long_frms"},
166 {"rmac_pause_ctrl_frms"},
167 {"rmac_unsup_ctrl_frms"},
168 {"rmac_ttl_octets"},
169 {"rmac_accepted_ucst_frms"},
170 {"rmac_accepted_nucst_frms"},
171 {"rmac_discarded_frms"},
172 {"rmac_drop_events"},
173 {"rmac_ttl_less_fb_octets"},
174 {"rmac_ttl_frms"},
175 {"rmac_usized_frms"},
176 {"rmac_osized_frms"},
177 {"rmac_frag_frms"},
178 {"rmac_jabber_frms"},
179 {"rmac_ttl_64_frms"},
180 {"rmac_ttl_65_127_frms"},
181 {"rmac_ttl_128_255_frms"},
182 {"rmac_ttl_256_511_frms"},
183 {"rmac_ttl_512_1023_frms"},
184 {"rmac_ttl_1024_1518_frms"},
185 {"rmac_ip"},
186 {"rmac_ip_octets"},
187 {"rmac_hdr_err_ip"},
188 {"rmac_drop_ip"},
189 {"rmac_icmp"},
190 {"rmac_tcp"},
191 {"rmac_udp"},
192 {"rmac_err_drp_udp"},
193 {"rmac_xgmii_err_sym"},
194 {"rmac_frms_q0"},
195 {"rmac_frms_q1"},
196 {"rmac_frms_q2"},
197 {"rmac_frms_q3"},
198 {"rmac_frms_q4"},
199 {"rmac_frms_q5"},
200 {"rmac_frms_q6"},
201 {"rmac_frms_q7"},
202 {"rmac_full_q0"},
203 {"rmac_full_q1"},
204 {"rmac_full_q2"},
205 {"rmac_full_q3"},
206 {"rmac_full_q4"},
207 {"rmac_full_q5"},
208 {"rmac_full_q6"},
209 {"rmac_full_q7"},
210 {"rmac_pause_cnt"},
211 {"rmac_xgmii_data_err_cnt"},
212 {"rmac_xgmii_ctrl_err_cnt"},
213 {"rmac_accepted_ip"},
214 {"rmac_err_tcp"},
215 {"rd_req_cnt"},
216 {"new_rd_req_cnt"},
217 {"new_rd_req_rtry_cnt"},
218 {"rd_rtry_cnt"},
219 {"wr_rtry_rd_ack_cnt"},
220 {"wr_req_cnt"},
221 {"new_wr_req_cnt"},
222 {"new_wr_req_rtry_cnt"},
223 {"wr_rtry_cnt"},
224 {"wr_disc_cnt"},
225 {"rd_rtry_wr_ack_cnt"},
226 {"txp_wr_cnt"},
227 {"txd_rd_cnt"},
228 {"txd_wr_cnt"},
229 {"rxd_rd_cnt"},
230 {"rxd_wr_cnt"},
231 {"txf_rd_cnt"},
232 {"rxf_wr_cnt"}
233};
234
235static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
236 {"rmac_ttl_1519_4095_frms"},
237 {"rmac_ttl_4096_8191_frms"},
238 {"rmac_ttl_8192_max_frms"},
239 {"rmac_ttl_gt_max_frms"},
240 {"rmac_osized_alt_frms"},
241 {"rmac_jabber_alt_frms"},
242 {"rmac_gt_max_alt_frms"},
243 {"rmac_vlan_frms"},
244 {"rmac_len_discard"},
245 {"rmac_fcs_discard"},
246 {"rmac_pf_discard"},
247 {"rmac_da_discard"},
248 {"rmac_red_discard"},
249 {"rmac_rts_discard"},
250 {"rmac_ingm_full_discard"},
251 {"link_fault_cnt"}
252};
253
254static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
255 {"\n DRIVER STATISTICS"},
256 {"single_bit_ecc_errs"},
257 {"double_bit_ecc_errs"},
258 {"parity_err_cnt"},
259 {"serious_err_cnt"},
260 {"soft_reset_cnt"},
261 {"fifo_full_cnt"},
262 {"ring_0_full_cnt"},
263 {"ring_1_full_cnt"},
264 {"ring_2_full_cnt"},
265 {"ring_3_full_cnt"},
266 {"ring_4_full_cnt"},
267 {"ring_5_full_cnt"},
268 {"ring_6_full_cnt"},
269 {"ring_7_full_cnt"},
270 {"alarm_transceiver_temp_high"},
271 {"alarm_transceiver_temp_low"},
272 {"alarm_laser_bias_current_high"},
273 {"alarm_laser_bias_current_low"},
274 {"alarm_laser_output_power_high"},
275 {"alarm_laser_output_power_low"},
276 {"warn_transceiver_temp_high"},
277 {"warn_transceiver_temp_low"},
278 {"warn_laser_bias_current_high"},
279 {"warn_laser_bias_current_low"},
280 {"warn_laser_output_power_high"},
281 {"warn_laser_output_power_low"},
282 {"lro_aggregated_pkts"},
283 {"lro_flush_both_count"},
284 {"lro_out_of_sequence_pkts"},
285 {"lro_flush_due_to_max_pkts"},
286 {"lro_avg_aggr_pkts"},
287 {"mem_alloc_fail_cnt"},
288 {"pci_map_fail_cnt"},
289 {"watchdog_timer_cnt"},
290 {"mem_allocated"},
291 {"mem_freed"},
292 {"link_up_cnt"},
293 {"link_down_cnt"},
294 {"link_up_time"},
295 {"link_down_time"},
296 {"tx_tcode_buf_abort_cnt"},
297 {"tx_tcode_desc_abort_cnt"},
298 {"tx_tcode_parity_err_cnt"},
299 {"tx_tcode_link_loss_cnt"},
300 {"tx_tcode_list_proc_err_cnt"},
301 {"rx_tcode_parity_err_cnt"},
302 {"rx_tcode_abort_cnt"},
303 {"rx_tcode_parity_abort_cnt"},
304 {"rx_tcode_rda_fail_cnt"},
305 {"rx_tcode_unkn_prot_cnt"},
306 {"rx_tcode_fcs_err_cnt"},
307 {"rx_tcode_buf_size_err_cnt"},
308 {"rx_tcode_rxd_corrupt_cnt"},
309 {"rx_tcode_unkn_err_cnt"},
310 {"tda_err_cnt"},
311 {"pfc_err_cnt"},
312 {"pcc_err_cnt"},
313 {"tti_err_cnt"},
314 {"tpa_err_cnt"},
315 {"sm_err_cnt"},
316 {"lso_err_cnt"},
317 {"mac_tmac_err_cnt"},
318 {"mac_rmac_err_cnt"},
319 {"xgxs_txgxs_err_cnt"},
320 {"xgxs_rxgxs_err_cnt"},
321 {"rc_err_cnt"},
322 {"prc_pcix_err_cnt"},
323 {"rpa_err_cnt"},
324 {"rda_err_cnt"},
325 {"rti_err_cnt"},
326 {"mc_err_cnt"}
327};
328
329#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
330#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
331#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
332
333#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
334#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
335
336#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
337#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
338
339#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
340#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
341
342#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
343 init_timer(&timer); \
344 timer.function = handle; \
345 timer.data = (unsigned long)arg; \
346 mod_timer(&timer, (jiffies + exp)) \
347
348/* copy mac addr to def_mac_addr array */
349static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
350{
351 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
352 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
353 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
354 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
355 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
356 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
357}
358
359/*
360 * Constants to be programmed into the Xena's registers, to configure
361 * the XAUI.
362 */
363
364#define END_SIGN 0x0
365static const u64 herc_act_dtx_cfg[] = {
366 /* Set address */
367 0x8000051536750000ULL, 0x80000515367500E0ULL,
368 /* Write data */
369 0x8000051536750004ULL, 0x80000515367500E4ULL,
370 /* Set address */
371 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
372 /* Write data */
373 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
374 /* Set address */
375 0x801205150D440000ULL, 0x801205150D4400E0ULL,
376 /* Write data */
377 0x801205150D440004ULL, 0x801205150D4400E4ULL,
378 /* Set address */
379 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
380 /* Write data */
381 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
382 /* Done */
383 END_SIGN
384};
385
386static const u64 xena_dtx_cfg[] = {
387 /* Set address */
388 0x8000051500000000ULL, 0x80000515000000E0ULL,
389 /* Write data */
390 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
391 /* Set address */
392 0x8001051500000000ULL, 0x80010515000000E0ULL,
393 /* Write data */
394 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
395 /* Set address */
396 0x8002051500000000ULL, 0x80020515000000E0ULL,
397 /* Write data */
398 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
399 END_SIGN
400};
401
402/*
403 * Constants for Fixing the MacAddress problem seen mostly on
404 * Alpha machines.
405 */
406static const u64 fix_mac[] = {
407 0x0060000000000000ULL, 0x0060600000000000ULL,
408 0x0040600000000000ULL, 0x0000600000000000ULL,
409 0x0020600000000000ULL, 0x0060600000000000ULL,
410 0x0020600000000000ULL, 0x0060600000000000ULL,
411 0x0020600000000000ULL, 0x0060600000000000ULL,
412 0x0020600000000000ULL, 0x0060600000000000ULL,
413 0x0020600000000000ULL, 0x0060600000000000ULL,
414 0x0020600000000000ULL, 0x0060600000000000ULL,
415 0x0020600000000000ULL, 0x0060600000000000ULL,
416 0x0020600000000000ULL, 0x0060600000000000ULL,
417 0x0020600000000000ULL, 0x0060600000000000ULL,
418 0x0020600000000000ULL, 0x0060600000000000ULL,
419 0x0020600000000000ULL, 0x0000600000000000ULL,
420 0x0040600000000000ULL, 0x0060600000000000ULL,
421 END_SIGN
422};
423
424MODULE_LICENSE("GPL");
425MODULE_VERSION(DRV_VERSION);
426
427
428/* Module Loadable parameters. */
429S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
430S2IO_PARM_INT(rx_ring_num, 1);
431S2IO_PARM_INT(multiq, 0);
432S2IO_PARM_INT(rx_ring_mode, 1);
433S2IO_PARM_INT(use_continuous_tx_intrs, 1);
434S2IO_PARM_INT(rmac_pause_time, 0x100);
435S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
436S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
437S2IO_PARM_INT(shared_splits, 0);
438S2IO_PARM_INT(tmac_util_period, 5);
439S2IO_PARM_INT(rmac_util_period, 5);
440S2IO_PARM_INT(l3l4hdr_size, 128);
441/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
442S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
443/* Frequency of Rx desc syncs expressed as power of 2 */
444S2IO_PARM_INT(rxsync_frequency, 3);
445/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
446S2IO_PARM_INT(intr_type, 2);
447/* Large receive offload feature */
448
449/* Max pkts to be aggregated by LRO at one time. If not specified,
450 * aggregation happens until we hit max IP pkt size(64K)
451 */
452S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
453S2IO_PARM_INT(indicate_max_pkts, 0);
454
455S2IO_PARM_INT(napi, 1);
456S2IO_PARM_INT(ufo, 0);
457S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
458
459static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
460{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
461static unsigned int rx_ring_sz[MAX_RX_RINGS] =
462{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
463static unsigned int rts_frm_len[MAX_RX_RINGS] =
464{[0 ...(MAX_RX_RINGS - 1)] = 0 };
465
466module_param_array(tx_fifo_len, uint, NULL, 0);
467module_param_array(rx_ring_sz, uint, NULL, 0);
468module_param_array(rts_frm_len, uint, NULL, 0);
469
470/*
471 * S2IO device table.
472 * This table lists all the devices that this driver supports.
473 */
474static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
475 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
476 PCI_ANY_ID, PCI_ANY_ID},
477 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
478 PCI_ANY_ID, PCI_ANY_ID},
479 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
480 PCI_ANY_ID, PCI_ANY_ID},
481 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
482 PCI_ANY_ID, PCI_ANY_ID},
483 {0,}
484};
485
486MODULE_DEVICE_TABLE(pci, s2io_tbl);
487
488static struct pci_error_handlers s2io_err_handler = {
489 .error_detected = s2io_io_error_detected,
490 .slot_reset = s2io_io_slot_reset,
491 .resume = s2io_io_resume,
492};
493
494static struct pci_driver s2io_driver = {
495 .name = "S2IO",
496 .id_table = s2io_tbl,
497 .probe = s2io_init_nic,
498 .remove = __devexit_p(s2io_rem_nic),
499 .err_handler = &s2io_err_handler,
500};
501
502/* A simplifier macro used both by init and free shared_mem Fns(). */
503#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
504
505/* netqueue manipulation helper functions */
506static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
507{
508 if (!sp->config.multiq) {
509 int i;
510
511 for (i = 0; i < sp->config.tx_fifo_num; i++)
512 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
513 }
514 netif_tx_stop_all_queues(sp->dev);
515}
516
517static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
518{
519 if (!sp->config.multiq)
520 sp->mac_control.fifos[fifo_no].queue_state =
521 FIFO_QUEUE_STOP;
522
523 netif_tx_stop_all_queues(sp->dev);
524}
525
526static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
527{
528 if (!sp->config.multiq) {
529 int i;
530
531 for (i = 0; i < sp->config.tx_fifo_num; i++)
532 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
533 }
534 netif_tx_start_all_queues(sp->dev);
535}
536
537static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
538{
539 if (!sp->config.multiq)
540 sp->mac_control.fifos[fifo_no].queue_state =
541 FIFO_QUEUE_START;
542
543 netif_tx_start_all_queues(sp->dev);
544}
545
546static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
547{
548 if (!sp->config.multiq) {
549 int i;
550
551 for (i = 0; i < sp->config.tx_fifo_num; i++)
552 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
553 }
554 netif_tx_wake_all_queues(sp->dev);
555}
556
557static inline void s2io_wake_tx_queue(
558 struct fifo_info *fifo, int cnt, u8 multiq)
559{
560
561 if (multiq) {
562 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
563 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
564 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
565 if (netif_queue_stopped(fifo->dev)) {
566 fifo->queue_state = FIFO_QUEUE_START;
567 netif_wake_queue(fifo->dev);
568 }
569 }
570}
571
572/**
573 * init_shared_mem - Allocation and Initialization of Memory
574 * @nic: Device private variable.
575 * Description: The function allocates all the memory areas shared
576 * between the NIC and the driver. This includes Tx descriptors,
577 * Rx descriptors and the statistics block.
578 */
579
580static int init_shared_mem(struct s2io_nic *nic)
581{
582 u32 size;
583 void *tmp_v_addr, *tmp_v_addr_next;
584 dma_addr_t tmp_p_addr, tmp_p_addr_next;
585 struct RxD_block *pre_rxd_blk = NULL;
586 int i, j, blk_cnt;
587 int lst_size, lst_per_page;
588 struct net_device *dev = nic->dev;
589 unsigned long tmp;
590 struct buffAdd *ba;
591 struct config_param *config = &nic->config;
592 struct mac_info *mac_control = &nic->mac_control;
593 unsigned long long mem_allocated = 0;
594
595 /* Allocation and initialization of TXDLs in FIFOs */
596 size = 0;
597 for (i = 0; i < config->tx_fifo_num; i++) {
598 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
599
600 size += tx_cfg->fifo_len;
601 }
602 if (size > MAX_AVAILABLE_TXDS) {
603 DBG_PRINT(ERR_DBG,
604 "Too many TxDs requested: %d, max supported: %d\n",
605 size, MAX_AVAILABLE_TXDS);
606 return -EINVAL;
607 }
608
609 size = 0;
610 for (i = 0; i < config->tx_fifo_num; i++) {
611 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
612
613 size = tx_cfg->fifo_len;
614 /*
615 * Legal values are from 2 to 8192
616 */
617 if (size < 2) {
618 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
619 "Valid lengths are 2 through 8192\n",
620 i, size);
621 return -EINVAL;
622 }
623 }
624
625 lst_size = (sizeof(struct TxD) * config->max_txds);
626 lst_per_page = PAGE_SIZE / lst_size;
627
628 for (i = 0; i < config->tx_fifo_num; i++) {
629 struct fifo_info *fifo = &mac_control->fifos[i];
630 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
631 int fifo_len = tx_cfg->fifo_len;
632 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
633
634 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
635 if (!fifo->list_info) {
636 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
637 return -ENOMEM;
638 }
639 mem_allocated += list_holder_size;
640 }
641 for (i = 0; i < config->tx_fifo_num; i++) {
642 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
643 lst_per_page);
644 struct fifo_info *fifo = &mac_control->fifos[i];
645 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
646
647 fifo->tx_curr_put_info.offset = 0;
648 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
649 fifo->tx_curr_get_info.offset = 0;
650 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
651 fifo->fifo_no = i;
652 fifo->nic = nic;
653 fifo->max_txds = MAX_SKB_FRAGS + 2;
654 fifo->dev = dev;
655
656 for (j = 0; j < page_num; j++) {
657 int k = 0;
658 dma_addr_t tmp_p;
659 void *tmp_v;
660 tmp_v = pci_alloc_consistent(nic->pdev,
661 PAGE_SIZE, &tmp_p);
662 if (!tmp_v) {
663 DBG_PRINT(INFO_DBG,
664 "pci_alloc_consistent failed for TxDL\n");
665 return -ENOMEM;
666 }
667 /* If we got a zero DMA address(can happen on
668 * certain platforms like PPC), reallocate.
669 * Store virtual address of page we don't want,
670 * to be freed later.
671 */
672 if (!tmp_p) {
673 mac_control->zerodma_virt_addr = tmp_v;
674 DBG_PRINT(INIT_DBG,
675 "%s: Zero DMA address for TxDL. "
676 "Virtual address %p\n",
677 dev->name, tmp_v);
678 tmp_v = pci_alloc_consistent(nic->pdev,
679 PAGE_SIZE, &tmp_p);
680 if (!tmp_v) {
681 DBG_PRINT(INFO_DBG,
682 "pci_alloc_consistent failed for TxDL\n");
683 return -ENOMEM;
684 }
685 mem_allocated += PAGE_SIZE;
686 }
687 while (k < lst_per_page) {
688 int l = (j * lst_per_page) + k;
689 if (l == tx_cfg->fifo_len)
690 break;
691 fifo->list_info[l].list_virt_addr =
692 tmp_v + (k * lst_size);
693 fifo->list_info[l].list_phy_addr =
694 tmp_p + (k * lst_size);
695 k++;
696 }
697 }
698 }
699
700 for (i = 0; i < config->tx_fifo_num; i++) {
701 struct fifo_info *fifo = &mac_control->fifos[i];
702 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
703
704 size = tx_cfg->fifo_len;
705 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
706 if (!fifo->ufo_in_band_v)
707 return -ENOMEM;
708 mem_allocated += (size * sizeof(u64));
709 }
710
711 /* Allocation and initialization of RXDs in Rings */
712 size = 0;
713 for (i = 0; i < config->rx_ring_num; i++) {
714 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
715 struct ring_info *ring = &mac_control->rings[i];
716
717 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
718 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
719 "multiple of RxDs per Block\n",
720 dev->name, i);
721 return FAILURE;
722 }
723 size += rx_cfg->num_rxd;
724 ring->block_count = rx_cfg->num_rxd /
725 (rxd_count[nic->rxd_mode] + 1);
726 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
727 }
728 if (nic->rxd_mode == RXD_MODE_1)
729 size = (size * (sizeof(struct RxD1)));
730 else
731 size = (size * (sizeof(struct RxD3)));
732
733 for (i = 0; i < config->rx_ring_num; i++) {
734 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
735 struct ring_info *ring = &mac_control->rings[i];
736
737 ring->rx_curr_get_info.block_index = 0;
738 ring->rx_curr_get_info.offset = 0;
739 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
740 ring->rx_curr_put_info.block_index = 0;
741 ring->rx_curr_put_info.offset = 0;
742 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
743 ring->nic = nic;
744 ring->ring_no = i;
745
746 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
747 /* Allocating all the Rx blocks */
748 for (j = 0; j < blk_cnt; j++) {
749 struct rx_block_info *rx_blocks;
750 int l;
751
752 rx_blocks = &ring->rx_blocks[j];
753 size = SIZE_OF_BLOCK; /* size is always page size */
754 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
755 &tmp_p_addr);
756 if (tmp_v_addr == NULL) {
757 /*
758 * In case of failure, free_shared_mem()
759 * is called, which should free any
760 * memory that was alloced till the
761 * failure happened.
762 */
763 rx_blocks->block_virt_addr = tmp_v_addr;
764 return -ENOMEM;
765 }
766 mem_allocated += size;
767 memset(tmp_v_addr, 0, size);
768
769 size = sizeof(struct rxd_info) *
770 rxd_count[nic->rxd_mode];
771 rx_blocks->block_virt_addr = tmp_v_addr;
772 rx_blocks->block_dma_addr = tmp_p_addr;
773 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
774 if (!rx_blocks->rxds)
775 return -ENOMEM;
776 mem_allocated += size;
777 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
778 rx_blocks->rxds[l].virt_addr =
779 rx_blocks->block_virt_addr +
780 (rxd_size[nic->rxd_mode] * l);
781 rx_blocks->rxds[l].dma_addr =
782 rx_blocks->block_dma_addr +
783 (rxd_size[nic->rxd_mode] * l);
784 }
785 }
786 /* Interlinking all Rx Blocks */
787 for (j = 0; j < blk_cnt; j++) {
788 int next = (j + 1) % blk_cnt;
789 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
790 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
791 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
792 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
793
794 pre_rxd_blk = tmp_v_addr;
795 pre_rxd_blk->reserved_2_pNext_RxD_block =
796 (unsigned long)tmp_v_addr_next;
797 pre_rxd_blk->pNext_RxD_Blk_physical =
798 (u64)tmp_p_addr_next;
799 }
800 }
801 if (nic->rxd_mode == RXD_MODE_3B) {
802 /*
803 * Allocation of Storages for buffer addresses in 2BUFF mode
804 * and the buffers as well.
805 */
806 for (i = 0; i < config->rx_ring_num; i++) {
807 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
808 struct ring_info *ring = &mac_control->rings[i];
809
810 blk_cnt = rx_cfg->num_rxd /
811 (rxd_count[nic->rxd_mode] + 1);
812 size = sizeof(struct buffAdd *) * blk_cnt;
813 ring->ba = kmalloc(size, GFP_KERNEL);
814 if (!ring->ba)
815 return -ENOMEM;
816 mem_allocated += size;
817 for (j = 0; j < blk_cnt; j++) {
818 int k = 0;
819
820 size = sizeof(struct buffAdd) *
821 (rxd_count[nic->rxd_mode] + 1);
822 ring->ba[j] = kmalloc(size, GFP_KERNEL);
823 if (!ring->ba[j])
824 return -ENOMEM;
825 mem_allocated += size;
826 while (k != rxd_count[nic->rxd_mode]) {
827 ba = &ring->ba[j][k];
828 size = BUF0_LEN + ALIGN_SIZE;
829 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
830 if (!ba->ba_0_org)
831 return -ENOMEM;
832 mem_allocated += size;
833 tmp = (unsigned long)ba->ba_0_org;
834 tmp += ALIGN_SIZE;
835 tmp &= ~((unsigned long)ALIGN_SIZE);
836 ba->ba_0 = (void *)tmp;
837
838 size = BUF1_LEN + ALIGN_SIZE;
839 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
840 if (!ba->ba_1_org)
841 return -ENOMEM;
842 mem_allocated += size;
843 tmp = (unsigned long)ba->ba_1_org;
844 tmp += ALIGN_SIZE;
845 tmp &= ~((unsigned long)ALIGN_SIZE);
846 ba->ba_1 = (void *)tmp;
847 k++;
848 }
849 }
850 }
851 }
852
853 /* Allocation and initialization of Statistics block */
854 size = sizeof(struct stat_block);
855 mac_control->stats_mem =
856 pci_alloc_consistent(nic->pdev, size,
857 &mac_control->stats_mem_phy);
858
859 if (!mac_control->stats_mem) {
860 /*
861 * In case of failure, free_shared_mem() is called, which
862 * should free any memory that was alloced till the
863 * failure happened.
864 */
865 return -ENOMEM;
866 }
867 mem_allocated += size;
868 mac_control->stats_mem_sz = size;
869
870 tmp_v_addr = mac_control->stats_mem;
871 mac_control->stats_info = tmp_v_addr;
872 memset(tmp_v_addr, 0, size);
873 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
874 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
875 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
876 return SUCCESS;
877}
878
879/**
880 * free_shared_mem - Free the allocated Memory
881 * @nic: Device private variable.
882 * Description: This function is to free all memory locations allocated by
883 * the init_shared_mem() function and return it to the kernel.
884 */
885
886static void free_shared_mem(struct s2io_nic *nic)
887{
888 int i, j, blk_cnt, size;
889 void *tmp_v_addr;
890 dma_addr_t tmp_p_addr;
891 int lst_size, lst_per_page;
892 struct net_device *dev;
893 int page_num = 0;
894 struct config_param *config;
895 struct mac_info *mac_control;
896 struct stat_block *stats;
897 struct swStat *swstats;
898
899 if (!nic)
900 return;
901
902 dev = nic->dev;
903
904 config = &nic->config;
905 mac_control = &nic->mac_control;
906 stats = mac_control->stats_info;
907 swstats = &stats->sw_stat;
908
909 lst_size = sizeof(struct TxD) * config->max_txds;
910 lst_per_page = PAGE_SIZE / lst_size;
911
912 for (i = 0; i < config->tx_fifo_num; i++) {
913 struct fifo_info *fifo = &mac_control->fifos[i];
914 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
915
916 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
917 for (j = 0; j < page_num; j++) {
918 int mem_blks = (j * lst_per_page);
919 struct list_info_hold *fli;
920
921 if (!fifo->list_info)
922 return;
923
924 fli = &fifo->list_info[mem_blks];
925 if (!fli->list_virt_addr)
926 break;
927 pci_free_consistent(nic->pdev, PAGE_SIZE,
928 fli->list_virt_addr,
929 fli->list_phy_addr);
930 swstats->mem_freed += PAGE_SIZE;
931 }
932 /* If we got a zero DMA address during allocation,
933 * free the page now
934 */
935 if (mac_control->zerodma_virt_addr) {
936 pci_free_consistent(nic->pdev, PAGE_SIZE,
937 mac_control->zerodma_virt_addr,
938 (dma_addr_t)0);
939 DBG_PRINT(INIT_DBG,
940 "%s: Freeing TxDL with zero DMA address. "
941 "Virtual address %p\n",
942 dev->name, mac_control->zerodma_virt_addr);
943 swstats->mem_freed += PAGE_SIZE;
944 }
945 kfree(fifo->list_info);
946 swstats->mem_freed += tx_cfg->fifo_len *
947 sizeof(struct list_info_hold);
948 }
949
950 size = SIZE_OF_BLOCK;
951 for (i = 0; i < config->rx_ring_num; i++) {
952 struct ring_info *ring = &mac_control->rings[i];
953
954 blk_cnt = ring->block_count;
955 for (j = 0; j < blk_cnt; j++) {
956 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
957 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
958 if (tmp_v_addr == NULL)
959 break;
960 pci_free_consistent(nic->pdev, size,
961 tmp_v_addr, tmp_p_addr);
962 swstats->mem_freed += size;
963 kfree(ring->rx_blocks[j].rxds);
964 swstats->mem_freed += sizeof(struct rxd_info) *
965 rxd_count[nic->rxd_mode];
966 }
967 }
968
969 if (nic->rxd_mode == RXD_MODE_3B) {
970 /* Freeing buffer storage addresses in 2BUFF mode. */
971 for (i = 0; i < config->rx_ring_num; i++) {
972 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
973 struct ring_info *ring = &mac_control->rings[i];
974
975 blk_cnt = rx_cfg->num_rxd /
976 (rxd_count[nic->rxd_mode] + 1);
977 for (j = 0; j < blk_cnt; j++) {
978 int k = 0;
979 if (!ring->ba[j])
980 continue;
981 while (k != rxd_count[nic->rxd_mode]) {
982 struct buffAdd *ba = &ring->ba[j][k];
983 kfree(ba->ba_0_org);
984 swstats->mem_freed +=
985 BUF0_LEN + ALIGN_SIZE;
986 kfree(ba->ba_1_org);
987 swstats->mem_freed +=
988 BUF1_LEN + ALIGN_SIZE;
989 k++;
990 }
991 kfree(ring->ba[j]);
992 swstats->mem_freed += sizeof(struct buffAdd) *
993 (rxd_count[nic->rxd_mode] + 1);
994 }
995 kfree(ring->ba);
996 swstats->mem_freed += sizeof(struct buffAdd *) *
997 blk_cnt;
998 }
999 }
1000
1001 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1002 struct fifo_info *fifo = &mac_control->fifos[i];
1003 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1004
1005 if (fifo->ufo_in_band_v) {
1006 swstats->mem_freed += tx_cfg->fifo_len *
1007 sizeof(u64);
1008 kfree(fifo->ufo_in_band_v);
1009 }
1010 }
1011
1012 if (mac_control->stats_mem) {
1013 swstats->mem_freed += mac_control->stats_mem_sz;
1014 pci_free_consistent(nic->pdev,
1015 mac_control->stats_mem_sz,
1016 mac_control->stats_mem,
1017 mac_control->stats_mem_phy);
1018 }
1019}
1020
1021/**
1022 * s2io_verify_pci_mode -
1023 */
1024
1025static int s2io_verify_pci_mode(struct s2io_nic *nic)
1026{
1027 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1028 register u64 val64 = 0;
1029 int mode;
1030
1031 val64 = readq(&bar0->pci_mode);
1032 mode = (u8)GET_PCI_MODE(val64);
1033
1034 if (val64 & PCI_MODE_UNKNOWN_MODE)
1035 return -1; /* Unknown PCI mode */
1036 return mode;
1037}
1038
1039#define NEC_VENID 0x1033
1040#define NEC_DEVID 0x0125
1041static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1042{
1043 struct pci_dev *tdev = NULL;
1044 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1045 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1046 if (tdev->bus == s2io_pdev->bus->parent) {
1047 pci_dev_put(tdev);
1048 return 1;
1049 }
1050 }
1051 }
1052 return 0;
1053}
1054
1055static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1056/**
1057 * s2io_print_pci_mode -
1058 */
1059static int s2io_print_pci_mode(struct s2io_nic *nic)
1060{
1061 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1062 register u64 val64 = 0;
1063 int mode;
1064 struct config_param *config = &nic->config;
1065 const char *pcimode;
1066
1067 val64 = readq(&bar0->pci_mode);
1068 mode = (u8)GET_PCI_MODE(val64);
1069
1070 if (val64 & PCI_MODE_UNKNOWN_MODE)
1071 return -1; /* Unknown PCI mode */
1072
1073 config->bus_speed = bus_speed[mode];
1074
1075 if (s2io_on_nec_bridge(nic->pdev)) {
1076 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1077 nic->dev->name);
1078 return mode;
1079 }
1080
1081 switch (mode) {
1082 case PCI_MODE_PCI_33:
1083 pcimode = "33MHz PCI bus";
1084 break;
1085 case PCI_MODE_PCI_66:
1086 pcimode = "66MHz PCI bus";
1087 break;
1088 case PCI_MODE_PCIX_M1_66:
1089 pcimode = "66MHz PCIX(M1) bus";
1090 break;
1091 case PCI_MODE_PCIX_M1_100:
1092 pcimode = "100MHz PCIX(M1) bus";
1093 break;
1094 case PCI_MODE_PCIX_M1_133:
1095 pcimode = "133MHz PCIX(M1) bus";
1096 break;
1097 case PCI_MODE_PCIX_M2_66:
1098 pcimode = "133MHz PCIX(M2) bus";
1099 break;
1100 case PCI_MODE_PCIX_M2_100:
1101 pcimode = "200MHz PCIX(M2) bus";
1102 break;
1103 case PCI_MODE_PCIX_M2_133:
1104 pcimode = "266MHz PCIX(M2) bus";
1105 break;
1106 default:
1107 pcimode = "unsupported bus!";
1108 mode = -1;
1109 }
1110
1111 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1112 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1113
1114 return mode;
1115}
1116
1117/**
1118 * init_tti - Initialization transmit traffic interrupt scheme
1119 * @nic: device private variable
1120 * @link: link status (UP/DOWN) used to enable/disable continuous
1121 * transmit interrupts
1122 * Description: The function configures transmit traffic interrupts
1123 * Return Value: SUCCESS on success and
1124 * '-1' on failure
1125 */
1126
1127static int init_tti(struct s2io_nic *nic, int link)
1128{
1129 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1130 register u64 val64 = 0;
1131 int i;
1132 struct config_param *config = &nic->config;
1133
1134 for (i = 0; i < config->tx_fifo_num; i++) {
1135 /*
1136 * TTI Initialization. Default Tx timer gets us about
1137 * 250 interrupts per sec. Continuous interrupts are enabled
1138 * by default.
1139 */
1140 if (nic->device_type == XFRAME_II_DEVICE) {
1141 int count = (nic->config.bus_speed * 125)/2;
1142 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1143 } else
1144 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1145
1146 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1147 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1148 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1149 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1150 if (i == 0)
1151 if (use_continuous_tx_intrs && (link == LINK_UP))
1152 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1153 writeq(val64, &bar0->tti_data1_mem);
1154
1155 if (nic->config.intr_type == MSI_X) {
1156 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1157 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1158 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1159 TTI_DATA2_MEM_TX_UFC_D(0x300);
1160 } else {
1161 if ((nic->config.tx_steering_type ==
1162 TX_DEFAULT_STEERING) &&
1163 (config->tx_fifo_num > 1) &&
1164 (i >= nic->udp_fifo_idx) &&
1165 (i < (nic->udp_fifo_idx +
1166 nic->total_udp_fifos)))
1167 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1168 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1169 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1170 TTI_DATA2_MEM_TX_UFC_D(0x120);
1171 else
1172 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1173 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1174 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1175 TTI_DATA2_MEM_TX_UFC_D(0x80);
1176 }
1177
1178 writeq(val64, &bar0->tti_data2_mem);
1179
1180 val64 = TTI_CMD_MEM_WE |
1181 TTI_CMD_MEM_STROBE_NEW_CMD |
1182 TTI_CMD_MEM_OFFSET(i);
1183 writeq(val64, &bar0->tti_command_mem);
1184
1185 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1186 TTI_CMD_MEM_STROBE_NEW_CMD,
1187 S2IO_BIT_RESET) != SUCCESS)
1188 return FAILURE;
1189 }
1190
1191 return SUCCESS;
1192}
1193
1194/**
1195 * init_nic - Initialization of hardware
1196 * @nic: device private variable
1197 * Description: The function sequentially configures every block
1198 * of the H/W from their reset values.
1199 * Return Value: SUCCESS on success and
1200 * '-1' on failure (endian settings incorrect).
1201 */
1202
1203static int init_nic(struct s2io_nic *nic)
1204{
1205 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1206 struct net_device *dev = nic->dev;
1207 register u64 val64 = 0;
1208 void __iomem *add;
1209 u32 time;
1210 int i, j;
1211 int dtx_cnt = 0;
1212 unsigned long long mem_share;
1213 int mem_size;
1214 struct config_param *config = &nic->config;
1215 struct mac_info *mac_control = &nic->mac_control;
1216
1217 /* to set the swapper controle on the card */
1218 if (s2io_set_swapper(nic)) {
1219 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1220 return -EIO;
1221 }
1222
1223 /*
1224 * Herc requires EOI to be removed from reset before XGXS, so..
1225 */
1226 if (nic->device_type & XFRAME_II_DEVICE) {
1227 val64 = 0xA500000000ULL;
1228 writeq(val64, &bar0->sw_reset);
1229 msleep(500);
1230 val64 = readq(&bar0->sw_reset);
1231 }
1232
1233 /* Remove XGXS from reset state */
1234 val64 = 0;
1235 writeq(val64, &bar0->sw_reset);
1236 msleep(500);
1237 val64 = readq(&bar0->sw_reset);
1238
1239 /* Ensure that it's safe to access registers by checking
1240 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1241 */
1242 if (nic->device_type == XFRAME_II_DEVICE) {
1243 for (i = 0; i < 50; i++) {
1244 val64 = readq(&bar0->adapter_status);
1245 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1246 break;
1247 msleep(10);
1248 }
1249 if (i == 50)
1250 return -ENODEV;
1251 }
1252
1253 /* Enable Receiving broadcasts */
1254 add = &bar0->mac_cfg;
1255 val64 = readq(&bar0->mac_cfg);
1256 val64 |= MAC_RMAC_BCAST_ENABLE;
1257 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1258 writel((u32)val64, add);
1259 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1260 writel((u32) (val64 >> 32), (add + 4));
1261
1262 /* Read registers in all blocks */
1263 val64 = readq(&bar0->mac_int_mask);
1264 val64 = readq(&bar0->mc_int_mask);
1265 val64 = readq(&bar0->xgxs_int_mask);
1266
1267 /* Set MTU */
1268 val64 = dev->mtu;
1269 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1270
1271 if (nic->device_type & XFRAME_II_DEVICE) {
1272 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1273 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1274 &bar0->dtx_control, UF);
1275 if (dtx_cnt & 0x1)
1276 msleep(1); /* Necessary!! */
1277 dtx_cnt++;
1278 }
1279 } else {
1280 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1281 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1282 &bar0->dtx_control, UF);
1283 val64 = readq(&bar0->dtx_control);
1284 dtx_cnt++;
1285 }
1286 }
1287
1288 /* Tx DMA Initialization */
1289 val64 = 0;
1290 writeq(val64, &bar0->tx_fifo_partition_0);
1291 writeq(val64, &bar0->tx_fifo_partition_1);
1292 writeq(val64, &bar0->tx_fifo_partition_2);
1293 writeq(val64, &bar0->tx_fifo_partition_3);
1294
1295 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1296 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1297
1298 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1299 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1300
1301 if (i == (config->tx_fifo_num - 1)) {
1302 if (i % 2 == 0)
1303 i++;
1304 }
1305
1306 switch (i) {
1307 case 1:
1308 writeq(val64, &bar0->tx_fifo_partition_0);
1309 val64 = 0;
1310 j = 0;
1311 break;
1312 case 3:
1313 writeq(val64, &bar0->tx_fifo_partition_1);
1314 val64 = 0;
1315 j = 0;
1316 break;
1317 case 5:
1318 writeq(val64, &bar0->tx_fifo_partition_2);
1319 val64 = 0;
1320 j = 0;
1321 break;
1322 case 7:
1323 writeq(val64, &bar0->tx_fifo_partition_3);
1324 val64 = 0;
1325 j = 0;
1326 break;
1327 default:
1328 j++;
1329 break;
1330 }
1331 }
1332
1333 /*
1334 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1335 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1336 */
1337 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1338 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1339
1340 val64 = readq(&bar0->tx_fifo_partition_0);
1341 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1342 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1343
1344 /*
1345 * Initialization of Tx_PA_CONFIG register to ignore packet
1346 * integrity checking.
1347 */
1348 val64 = readq(&bar0->tx_pa_cfg);
1349 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1350 TX_PA_CFG_IGNORE_SNAP_OUI |
1351 TX_PA_CFG_IGNORE_LLC_CTRL |
1352 TX_PA_CFG_IGNORE_L2_ERR;
1353 writeq(val64, &bar0->tx_pa_cfg);
1354
1355 /* Rx DMA intialization. */
1356 val64 = 0;
1357 for (i = 0; i < config->rx_ring_num; i++) {
1358 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1359
1360 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1361 }
1362 writeq(val64, &bar0->rx_queue_priority);
1363
1364 /*
1365 * Allocating equal share of memory to all the
1366 * configured Rings.
1367 */
1368 val64 = 0;
1369 if (nic->device_type & XFRAME_II_DEVICE)
1370 mem_size = 32;
1371 else
1372 mem_size = 64;
1373
1374 for (i = 0; i < config->rx_ring_num; i++) {
1375 switch (i) {
1376 case 0:
1377 mem_share = (mem_size / config->rx_ring_num +
1378 mem_size % config->rx_ring_num);
1379 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1380 continue;
1381 case 1:
1382 mem_share = (mem_size / config->rx_ring_num);
1383 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1384 continue;
1385 case 2:
1386 mem_share = (mem_size / config->rx_ring_num);
1387 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1388 continue;
1389 case 3:
1390 mem_share = (mem_size / config->rx_ring_num);
1391 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1392 continue;
1393 case 4:
1394 mem_share = (mem_size / config->rx_ring_num);
1395 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1396 continue;
1397 case 5:
1398 mem_share = (mem_size / config->rx_ring_num);
1399 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1400 continue;
1401 case 6:
1402 mem_share = (mem_size / config->rx_ring_num);
1403 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1404 continue;
1405 case 7:
1406 mem_share = (mem_size / config->rx_ring_num);
1407 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1408 continue;
1409 }
1410 }
1411 writeq(val64, &bar0->rx_queue_cfg);
1412
1413 /*
1414 * Filling Tx round robin registers
1415 * as per the number of FIFOs for equal scheduling priority
1416 */
1417 switch (config->tx_fifo_num) {
1418 case 1:
1419 val64 = 0x0;
1420 writeq(val64, &bar0->tx_w_round_robin_0);
1421 writeq(val64, &bar0->tx_w_round_robin_1);
1422 writeq(val64, &bar0->tx_w_round_robin_2);
1423 writeq(val64, &bar0->tx_w_round_robin_3);
1424 writeq(val64, &bar0->tx_w_round_robin_4);
1425 break;
1426 case 2:
1427 val64 = 0x0001000100010001ULL;
1428 writeq(val64, &bar0->tx_w_round_robin_0);
1429 writeq(val64, &bar0->tx_w_round_robin_1);
1430 writeq(val64, &bar0->tx_w_round_robin_2);
1431 writeq(val64, &bar0->tx_w_round_robin_3);
1432 val64 = 0x0001000100000000ULL;
1433 writeq(val64, &bar0->tx_w_round_robin_4);
1434 break;
1435 case 3:
1436 val64 = 0x0001020001020001ULL;
1437 writeq(val64, &bar0->tx_w_round_robin_0);
1438 val64 = 0x0200010200010200ULL;
1439 writeq(val64, &bar0->tx_w_round_robin_1);
1440 val64 = 0x0102000102000102ULL;
1441 writeq(val64, &bar0->tx_w_round_robin_2);
1442 val64 = 0x0001020001020001ULL;
1443 writeq(val64, &bar0->tx_w_round_robin_3);
1444 val64 = 0x0200010200000000ULL;
1445 writeq(val64, &bar0->tx_w_round_robin_4);
1446 break;
1447 case 4:
1448 val64 = 0x0001020300010203ULL;
1449 writeq(val64, &bar0->tx_w_round_robin_0);
1450 writeq(val64, &bar0->tx_w_round_robin_1);
1451 writeq(val64, &bar0->tx_w_round_robin_2);
1452 writeq(val64, &bar0->tx_w_round_robin_3);
1453 val64 = 0x0001020300000000ULL;
1454 writeq(val64, &bar0->tx_w_round_robin_4);
1455 break;
1456 case 5:
1457 val64 = 0x0001020304000102ULL;
1458 writeq(val64, &bar0->tx_w_round_robin_0);
1459 val64 = 0x0304000102030400ULL;
1460 writeq(val64, &bar0->tx_w_round_robin_1);
1461 val64 = 0x0102030400010203ULL;
1462 writeq(val64, &bar0->tx_w_round_robin_2);
1463 val64 = 0x0400010203040001ULL;
1464 writeq(val64, &bar0->tx_w_round_robin_3);
1465 val64 = 0x0203040000000000ULL;
1466 writeq(val64, &bar0->tx_w_round_robin_4);
1467 break;
1468 case 6:
1469 val64 = 0x0001020304050001ULL;
1470 writeq(val64, &bar0->tx_w_round_robin_0);
1471 val64 = 0x0203040500010203ULL;
1472 writeq(val64, &bar0->tx_w_round_robin_1);
1473 val64 = 0x0405000102030405ULL;
1474 writeq(val64, &bar0->tx_w_round_robin_2);
1475 val64 = 0x0001020304050001ULL;
1476 writeq(val64, &bar0->tx_w_round_robin_3);
1477 val64 = 0x0203040500000000ULL;
1478 writeq(val64, &bar0->tx_w_round_robin_4);
1479 break;
1480 case 7:
1481 val64 = 0x0001020304050600ULL;
1482 writeq(val64, &bar0->tx_w_round_robin_0);
1483 val64 = 0x0102030405060001ULL;
1484 writeq(val64, &bar0->tx_w_round_robin_1);
1485 val64 = 0x0203040506000102ULL;
1486 writeq(val64, &bar0->tx_w_round_robin_2);
1487 val64 = 0x0304050600010203ULL;
1488 writeq(val64, &bar0->tx_w_round_robin_3);
1489 val64 = 0x0405060000000000ULL;
1490 writeq(val64, &bar0->tx_w_round_robin_4);
1491 break;
1492 case 8:
1493 val64 = 0x0001020304050607ULL;
1494 writeq(val64, &bar0->tx_w_round_robin_0);
1495 writeq(val64, &bar0->tx_w_round_robin_1);
1496 writeq(val64, &bar0->tx_w_round_robin_2);
1497 writeq(val64, &bar0->tx_w_round_robin_3);
1498 val64 = 0x0001020300000000ULL;
1499 writeq(val64, &bar0->tx_w_round_robin_4);
1500 break;
1501 }
1502
1503 /* Enable all configured Tx FIFO partitions */
1504 val64 = readq(&bar0->tx_fifo_partition_0);
1505 val64 |= (TX_FIFO_PARTITION_EN);
1506 writeq(val64, &bar0->tx_fifo_partition_0);
1507
1508 /* Filling the Rx round robin registers as per the
1509 * number of Rings and steering based on QoS with
1510 * equal priority.
1511 */
1512 switch (config->rx_ring_num) {
1513 case 1:
1514 val64 = 0x0;
1515 writeq(val64, &bar0->rx_w_round_robin_0);
1516 writeq(val64, &bar0->rx_w_round_robin_1);
1517 writeq(val64, &bar0->rx_w_round_robin_2);
1518 writeq(val64, &bar0->rx_w_round_robin_3);
1519 writeq(val64, &bar0->rx_w_round_robin_4);
1520
1521 val64 = 0x8080808080808080ULL;
1522 writeq(val64, &bar0->rts_qos_steering);
1523 break;
1524 case 2:
1525 val64 = 0x0001000100010001ULL;
1526 writeq(val64, &bar0->rx_w_round_robin_0);
1527 writeq(val64, &bar0->rx_w_round_robin_1);
1528 writeq(val64, &bar0->rx_w_round_robin_2);
1529 writeq(val64, &bar0->rx_w_round_robin_3);
1530 val64 = 0x0001000100000000ULL;
1531 writeq(val64, &bar0->rx_w_round_robin_4);
1532
1533 val64 = 0x8080808040404040ULL;
1534 writeq(val64, &bar0->rts_qos_steering);
1535 break;
1536 case 3:
1537 val64 = 0x0001020001020001ULL;
1538 writeq(val64, &bar0->rx_w_round_robin_0);
1539 val64 = 0x0200010200010200ULL;
1540 writeq(val64, &bar0->rx_w_round_robin_1);
1541 val64 = 0x0102000102000102ULL;
1542 writeq(val64, &bar0->rx_w_round_robin_2);
1543 val64 = 0x0001020001020001ULL;
1544 writeq(val64, &bar0->rx_w_round_robin_3);
1545 val64 = 0x0200010200000000ULL;
1546 writeq(val64, &bar0->rx_w_round_robin_4);
1547
1548 val64 = 0x8080804040402020ULL;
1549 writeq(val64, &bar0->rts_qos_steering);
1550 break;
1551 case 4:
1552 val64 = 0x0001020300010203ULL;
1553 writeq(val64, &bar0->rx_w_round_robin_0);
1554 writeq(val64, &bar0->rx_w_round_robin_1);
1555 writeq(val64, &bar0->rx_w_round_robin_2);
1556 writeq(val64, &bar0->rx_w_round_robin_3);
1557 val64 = 0x0001020300000000ULL;
1558 writeq(val64, &bar0->rx_w_round_robin_4);
1559
1560 val64 = 0x8080404020201010ULL;
1561 writeq(val64, &bar0->rts_qos_steering);
1562 break;
1563 case 5:
1564 val64 = 0x0001020304000102ULL;
1565 writeq(val64, &bar0->rx_w_round_robin_0);
1566 val64 = 0x0304000102030400ULL;
1567 writeq(val64, &bar0->rx_w_round_robin_1);
1568 val64 = 0x0102030400010203ULL;
1569 writeq(val64, &bar0->rx_w_round_robin_2);
1570 val64 = 0x0400010203040001ULL;
1571 writeq(val64, &bar0->rx_w_round_robin_3);
1572 val64 = 0x0203040000000000ULL;
1573 writeq(val64, &bar0->rx_w_round_robin_4);
1574
1575 val64 = 0x8080404020201008ULL;
1576 writeq(val64, &bar0->rts_qos_steering);
1577 break;
1578 case 6:
1579 val64 = 0x0001020304050001ULL;
1580 writeq(val64, &bar0->rx_w_round_robin_0);
1581 val64 = 0x0203040500010203ULL;
1582 writeq(val64, &bar0->rx_w_round_robin_1);
1583 val64 = 0x0405000102030405ULL;
1584 writeq(val64, &bar0->rx_w_round_robin_2);
1585 val64 = 0x0001020304050001ULL;
1586 writeq(val64, &bar0->rx_w_round_robin_3);
1587 val64 = 0x0203040500000000ULL;
1588 writeq(val64, &bar0->rx_w_round_robin_4);
1589
1590 val64 = 0x8080404020100804ULL;
1591 writeq(val64, &bar0->rts_qos_steering);
1592 break;
1593 case 7:
1594 val64 = 0x0001020304050600ULL;
1595 writeq(val64, &bar0->rx_w_round_robin_0);
1596 val64 = 0x0102030405060001ULL;
1597 writeq(val64, &bar0->rx_w_round_robin_1);
1598 val64 = 0x0203040506000102ULL;
1599 writeq(val64, &bar0->rx_w_round_robin_2);
1600 val64 = 0x0304050600010203ULL;
1601 writeq(val64, &bar0->rx_w_round_robin_3);
1602 val64 = 0x0405060000000000ULL;
1603 writeq(val64, &bar0->rx_w_round_robin_4);
1604
1605 val64 = 0x8080402010080402ULL;
1606 writeq(val64, &bar0->rts_qos_steering);
1607 break;
1608 case 8:
1609 val64 = 0x0001020304050607ULL;
1610 writeq(val64, &bar0->rx_w_round_robin_0);
1611 writeq(val64, &bar0->rx_w_round_robin_1);
1612 writeq(val64, &bar0->rx_w_round_robin_2);
1613 writeq(val64, &bar0->rx_w_round_robin_3);
1614 val64 = 0x0001020300000000ULL;
1615 writeq(val64, &bar0->rx_w_round_robin_4);
1616
1617 val64 = 0x8040201008040201ULL;
1618 writeq(val64, &bar0->rts_qos_steering);
1619 break;
1620 }
1621
1622 /* UDP Fix */
1623 val64 = 0;
1624 for (i = 0; i < 8; i++)
1625 writeq(val64, &bar0->rts_frm_len_n[i]);
1626
1627 /* Set the default rts frame length for the rings configured */
1628 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1629 for (i = 0 ; i < config->rx_ring_num ; i++)
1630 writeq(val64, &bar0->rts_frm_len_n[i]);
1631
1632 /* Set the frame length for the configured rings
1633 * desired by the user
1634 */
1635 for (i = 0; i < config->rx_ring_num; i++) {
1636 /* If rts_frm_len[i] == 0 then it is assumed that user not
1637 * specified frame length steering.
1638 * If the user provides the frame length then program
1639 * the rts_frm_len register for those values or else
1640 * leave it as it is.
1641 */
1642 if (rts_frm_len[i] != 0) {
1643 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1644 &bar0->rts_frm_len_n[i]);
1645 }
1646 }
1647
1648 /* Disable differentiated services steering logic */
1649 for (i = 0; i < 64; i++) {
1650 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1651 DBG_PRINT(ERR_DBG,
1652 "%s: rts_ds_steer failed on codepoint %d\n",
1653 dev->name, i);
1654 return -ENODEV;
1655 }
1656 }
1657
1658 /* Program statistics memory */
1659 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1660
1661 if (nic->device_type == XFRAME_II_DEVICE) {
1662 val64 = STAT_BC(0x320);
1663 writeq(val64, &bar0->stat_byte_cnt);
1664 }
1665
1666 /*
1667 * Initializing the sampling rate for the device to calculate the
1668 * bandwidth utilization.
1669 */
1670 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1671 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1672 writeq(val64, &bar0->mac_link_util);
1673
1674 /*
1675 * Initializing the Transmit and Receive Traffic Interrupt
1676 * Scheme.
1677 */
1678
1679 /* Initialize TTI */
1680 if (SUCCESS != init_tti(nic, nic->last_link_state))
1681 return -ENODEV;
1682
1683 /* RTI Initialization */
1684 if (nic->device_type == XFRAME_II_DEVICE) {
1685 /*
1686 * Programmed to generate Apprx 500 Intrs per
1687 * second
1688 */
1689 int count = (nic->config.bus_speed * 125)/4;
1690 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1691 } else
1692 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1693 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1694 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1695 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1696 RTI_DATA1_MEM_RX_TIMER_AC_EN;
1697
1698 writeq(val64, &bar0->rti_data1_mem);
1699
1700 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1701 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1702 if (nic->config.intr_type == MSI_X)
1703 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1704 RTI_DATA2_MEM_RX_UFC_D(0x40));
1705 else
1706 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1707 RTI_DATA2_MEM_RX_UFC_D(0x80));
1708 writeq(val64, &bar0->rti_data2_mem);
1709
1710 for (i = 0; i < config->rx_ring_num; i++) {
1711 val64 = RTI_CMD_MEM_WE |
1712 RTI_CMD_MEM_STROBE_NEW_CMD |
1713 RTI_CMD_MEM_OFFSET(i);
1714 writeq(val64, &bar0->rti_command_mem);
1715
1716 /*
1717 * Once the operation completes, the Strobe bit of the
1718 * command register will be reset. We poll for this
1719 * particular condition. We wait for a maximum of 500ms
1720 * for the operation to complete, if it's not complete
1721 * by then we return error.
1722 */
1723 time = 0;
1724 while (true) {
1725 val64 = readq(&bar0->rti_command_mem);
1726 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1727 break;
1728
1729 if (time > 10) {
1730 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1731 dev->name);
1732 return -ENODEV;
1733 }
1734 time++;
1735 msleep(50);
1736 }
1737 }
1738
1739 /*
1740 * Initializing proper values as Pause threshold into all
1741 * the 8 Queues on Rx side.
1742 */
1743 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1744 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1745
1746 /* Disable RMAC PAD STRIPPING */
1747 add = &bar0->mac_cfg;
1748 val64 = readq(&bar0->mac_cfg);
1749 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1750 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1751 writel((u32) (val64), add);
1752 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1753 writel((u32) (val64 >> 32), (add + 4));
1754 val64 = readq(&bar0->mac_cfg);
1755
1756 /* Enable FCS stripping by adapter */
1757 add = &bar0->mac_cfg;
1758 val64 = readq(&bar0->mac_cfg);
1759 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1760 if (nic->device_type == XFRAME_II_DEVICE)
1761 writeq(val64, &bar0->mac_cfg);
1762 else {
1763 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1764 writel((u32) (val64), add);
1765 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1766 writel((u32) (val64 >> 32), (add + 4));
1767 }
1768
1769 /*
1770 * Set the time value to be inserted in the pause frame
1771 * generated by xena.
1772 */
1773 val64 = readq(&bar0->rmac_pause_cfg);
1774 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1775 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1776 writeq(val64, &bar0->rmac_pause_cfg);
1777
1778 /*
1779 * Set the Threshold Limit for Generating the pause frame
1780 * If the amount of data in any Queue exceeds ratio of
1781 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1782 * pause frame is generated
1783 */
1784 val64 = 0;
1785 for (i = 0; i < 4; i++) {
1786 val64 |= (((u64)0xFF00 |
1787 nic->mac_control.mc_pause_threshold_q0q3)
1788 << (i * 2 * 8));
1789 }
1790 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1791
1792 val64 = 0;
1793 for (i = 0; i < 4; i++) {
1794 val64 |= (((u64)0xFF00 |
1795 nic->mac_control.mc_pause_threshold_q4q7)
1796 << (i * 2 * 8));
1797 }
1798 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1799
1800 /*
1801 * TxDMA will stop Read request if the number of read split has
1802 * exceeded the limit pointed by shared_splits
1803 */
1804 val64 = readq(&bar0->pic_control);
1805 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1806 writeq(val64, &bar0->pic_control);
1807
1808 if (nic->config.bus_speed == 266) {
1809 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1810 writeq(0x0, &bar0->read_retry_delay);
1811 writeq(0x0, &bar0->write_retry_delay);
1812 }
1813
1814 /*
1815 * Programming the Herc to split every write transaction
1816 * that does not start on an ADB to reduce disconnects.
1817 */
1818 if (nic->device_type == XFRAME_II_DEVICE) {
1819 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1820 MISC_LINK_STABILITY_PRD(3);
1821 writeq(val64, &bar0->misc_control);
1822 val64 = readq(&bar0->pic_control2);
1823 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1824 writeq(val64, &bar0->pic_control2);
1825 }
1826 if (strstr(nic->product_name, "CX4")) {
1827 val64 = TMAC_AVG_IPG(0x17);
1828 writeq(val64, &bar0->tmac_avg_ipg);
1829 }
1830
1831 return SUCCESS;
1832}
1833#define LINK_UP_DOWN_INTERRUPT 1
1834#define MAC_RMAC_ERR_TIMER 2
1835
1836static int s2io_link_fault_indication(struct s2io_nic *nic)
1837{
1838 if (nic->device_type == XFRAME_II_DEVICE)
1839 return LINK_UP_DOWN_INTERRUPT;
1840 else
1841 return MAC_RMAC_ERR_TIMER;
1842}
1843
1844/**
1845 * do_s2io_write_bits - update alarm bits in alarm register
1846 * @value: alarm bits
1847 * @flag: interrupt status
1848 * @addr: address value
1849 * Description: update alarm bits in alarm register
1850 * Return Value:
1851 * NONE.
1852 */
1853static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1854{
1855 u64 temp64;
1856
1857 temp64 = readq(addr);
1858
1859 if (flag == ENABLE_INTRS)
1860 temp64 &= ~((u64)value);
1861 else
1862 temp64 |= ((u64)value);
1863 writeq(temp64, addr);
1864}
1865
1866static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1867{
1868 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1869 register u64 gen_int_mask = 0;
1870 u64 interruptible;
1871
1872 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1873 if (mask & TX_DMA_INTR) {
1874 gen_int_mask |= TXDMA_INT_M;
1875
1876 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1877 TXDMA_PCC_INT | TXDMA_TTI_INT |
1878 TXDMA_LSO_INT | TXDMA_TPA_INT |
1879 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1880
1881 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1882 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1883 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1884 &bar0->pfc_err_mask);
1885
1886 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1887 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1888 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1889
1890 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1891 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1892 PCC_N_SERR | PCC_6_COF_OV_ERR |
1893 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1894 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1895 PCC_TXB_ECC_SG_ERR,
1896 flag, &bar0->pcc_err_mask);
1897
1898 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1899 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1900
1901 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1902 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1903 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1904 flag, &bar0->lso_err_mask);
1905
1906 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1907 flag, &bar0->tpa_err_mask);
1908
1909 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1910 }
1911
1912 if (mask & TX_MAC_INTR) {
1913 gen_int_mask |= TXMAC_INT_M;
1914 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1915 &bar0->mac_int_mask);
1916 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1917 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1918 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1919 flag, &bar0->mac_tmac_err_mask);
1920 }
1921
1922 if (mask & TX_XGXS_INTR) {
1923 gen_int_mask |= TXXGXS_INT_M;
1924 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1925 &bar0->xgxs_int_mask);
1926 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1927 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1928 flag, &bar0->xgxs_txgxs_err_mask);
1929 }
1930
1931 if (mask & RX_DMA_INTR) {
1932 gen_int_mask |= RXDMA_INT_M;
1933 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1934 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1935 flag, &bar0->rxdma_int_mask);
1936 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1937 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1938 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1939 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1940 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1941 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1942 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1943 &bar0->prc_pcix_err_mask);
1944 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1945 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1946 &bar0->rpa_err_mask);
1947 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1948 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1949 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1950 RDA_FRM_ECC_SG_ERR |
1951 RDA_MISC_ERR|RDA_PCIX_ERR,
1952 flag, &bar0->rda_err_mask);
1953 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1954 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1955 flag, &bar0->rti_err_mask);
1956 }
1957
1958 if (mask & RX_MAC_INTR) {
1959 gen_int_mask |= RXMAC_INT_M;
1960 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1961 &bar0->mac_int_mask);
1962 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1963 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1964 RMAC_DOUBLE_ECC_ERR);
1965 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1966 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1967 do_s2io_write_bits(interruptible,
1968 flag, &bar0->mac_rmac_err_mask);
1969 }
1970
1971 if (mask & RX_XGXS_INTR) {
1972 gen_int_mask |= RXXGXS_INT_M;
1973 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1974 &bar0->xgxs_int_mask);
1975 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1976 &bar0->xgxs_rxgxs_err_mask);
1977 }
1978
1979 if (mask & MC_INTR) {
1980 gen_int_mask |= MC_INT_M;
1981 do_s2io_write_bits(MC_INT_MASK_MC_INT,
1982 flag, &bar0->mc_int_mask);
1983 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1984 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1985 &bar0->mc_err_mask);
1986 }
1987 nic->general_int_mask = gen_int_mask;
1988
1989 /* Remove this line when alarm interrupts are enabled */
1990 nic->general_int_mask = 0;
1991}
1992
1993/**
1994 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1995 * @nic: device private variable,
1996 * @mask: A mask indicating which Intr block must be modified and,
1997 * @flag: A flag indicating whether to enable or disable the Intrs.
1998 * Description: This function will either disable or enable the interrupts
1999 * depending on the flag argument. The mask argument can be used to
2000 * enable/disable any Intr block.
2001 * Return Value: NONE.
2002 */
2003
2004static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2005{
2006 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2007 register u64 temp64 = 0, intr_mask = 0;
2008
2009 intr_mask = nic->general_int_mask;
2010
2011 /* Top level interrupt classification */
2012 /* PIC Interrupts */
2013 if (mask & TX_PIC_INTR) {
2014 /* Enable PIC Intrs in the general intr mask register */
2015 intr_mask |= TXPIC_INT_M;
2016 if (flag == ENABLE_INTRS) {
2017 /*
2018 * If Hercules adapter enable GPIO otherwise
2019 * disable all PCIX, Flash, MDIO, IIC and GPIO
2020 * interrupts for now.
2021 * TODO
2022 */
2023 if (s2io_link_fault_indication(nic) ==
2024 LINK_UP_DOWN_INTERRUPT) {
2025 do_s2io_write_bits(PIC_INT_GPIO, flag,
2026 &bar0->pic_int_mask);
2027 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2028 &bar0->gpio_int_mask);
2029 } else
2030 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2031 } else if (flag == DISABLE_INTRS) {
2032 /*
2033 * Disable PIC Intrs in the general
2034 * intr mask register
2035 */
2036 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2037 }
2038 }
2039
2040 /* Tx traffic interrupts */
2041 if (mask & TX_TRAFFIC_INTR) {
2042 intr_mask |= TXTRAFFIC_INT_M;
2043 if (flag == ENABLE_INTRS) {
2044 /*
2045 * Enable all the Tx side interrupts
2046 * writing 0 Enables all 64 TX interrupt levels
2047 */
2048 writeq(0x0, &bar0->tx_traffic_mask);
2049 } else if (flag == DISABLE_INTRS) {
2050 /*
2051 * Disable Tx Traffic Intrs in the general intr mask
2052 * register.
2053 */
2054 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2055 }
2056 }
2057
2058 /* Rx traffic interrupts */
2059 if (mask & RX_TRAFFIC_INTR) {
2060 intr_mask |= RXTRAFFIC_INT_M;
2061 if (flag == ENABLE_INTRS) {
2062 /* writing 0 Enables all 8 RX interrupt levels */
2063 writeq(0x0, &bar0->rx_traffic_mask);
2064 } else if (flag == DISABLE_INTRS) {
2065 /*
2066 * Disable Rx Traffic Intrs in the general intr mask
2067 * register.
2068 */
2069 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2070 }
2071 }
2072
2073 temp64 = readq(&bar0->general_int_mask);
2074 if (flag == ENABLE_INTRS)
2075 temp64 &= ~((u64)intr_mask);
2076 else
2077 temp64 = DISABLE_ALL_INTRS;
2078 writeq(temp64, &bar0->general_int_mask);
2079
2080 nic->general_int_mask = readq(&bar0->general_int_mask);
2081}
2082
2083/**
2084 * verify_pcc_quiescent- Checks for PCC quiescent state
2085 * Return: 1 If PCC is quiescence
2086 * 0 If PCC is not quiescence
2087 */
2088static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2089{
2090 int ret = 0, herc;
2091 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2092 u64 val64 = readq(&bar0->adapter_status);
2093
2094 herc = (sp->device_type == XFRAME_II_DEVICE);
2095
2096 if (flag == false) {
2097 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2098 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2099 ret = 1;
2100 } else {
2101 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2102 ret = 1;
2103 }
2104 } else {
2105 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2106 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2107 ADAPTER_STATUS_RMAC_PCC_IDLE))
2108 ret = 1;
2109 } else {
2110 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2111 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2112 ret = 1;
2113 }
2114 }
2115
2116 return ret;
2117}
2118/**
2119 * verify_xena_quiescence - Checks whether the H/W is ready
2120 * Description: Returns whether the H/W is ready to go or not. Depending
2121 * on whether adapter enable bit was written or not the comparison
2122 * differs and the calling function passes the input argument flag to
2123 * indicate this.
2124 * Return: 1 If xena is quiescence
2125 * 0 If Xena is not quiescence
2126 */
2127
2128static int verify_xena_quiescence(struct s2io_nic *sp)
2129{
2130 int mode;
2131 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2132 u64 val64 = readq(&bar0->adapter_status);
2133 mode = s2io_verify_pci_mode(sp);
2134
2135 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2136 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2137 return 0;
2138 }
2139 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2140 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2141 return 0;
2142 }
2143 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2144 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2145 return 0;
2146 }
2147 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2148 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2149 return 0;
2150 }
2151 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2152 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2153 return 0;
2154 }
2155 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2156 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2157 return 0;
2158 }
2159 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2160 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2161 return 0;
2162 }
2163 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2164 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2165 return 0;
2166 }
2167
2168 /*
2169 * In PCI 33 mode, the P_PLL is not used, and therefore,
2170 * the the P_PLL_LOCK bit in the adapter_status register will
2171 * not be asserted.
2172 */
2173 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2174 sp->device_type == XFRAME_II_DEVICE &&
2175 mode != PCI_MODE_PCI_33) {
2176 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2177 return 0;
2178 }
2179 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2180 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2181 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2182 return 0;
2183 }
2184 return 1;
2185}
2186
2187/**
2188 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2189 * @sp: Pointer to device specifc structure
2190 * Description :
2191 * New procedure to clear mac address reading problems on Alpha platforms
2192 *
2193 */
2194
2195static void fix_mac_address(struct s2io_nic *sp)
2196{
2197 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2198 int i = 0;
2199
2200 while (fix_mac[i] != END_SIGN) {
2201 writeq(fix_mac[i++], &bar0->gpio_control);
2202 udelay(10);
2203 (void) readq(&bar0->gpio_control);
2204 }
2205}
2206
2207/**
2208 * start_nic - Turns the device on
2209 * @nic : device private variable.
2210 * Description:
2211 * This function actually turns the device on. Before this function is
2212 * called,all Registers are configured from their reset states
2213 * and shared memory is allocated but the NIC is still quiescent. On
2214 * calling this function, the device interrupts are cleared and the NIC is
2215 * literally switched on by writing into the adapter control register.
2216 * Return Value:
2217 * SUCCESS on success and -1 on failure.
2218 */
2219
2220static int start_nic(struct s2io_nic *nic)
2221{
2222 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2223 struct net_device *dev = nic->dev;
2224 register u64 val64 = 0;
2225 u16 subid, i;
2226 struct config_param *config = &nic->config;
2227 struct mac_info *mac_control = &nic->mac_control;
2228
2229 /* PRC Initialization and configuration */
2230 for (i = 0; i < config->rx_ring_num; i++) {
2231 struct ring_info *ring = &mac_control->rings[i];
2232
2233 writeq((u64)ring->rx_blocks[0].block_dma_addr,
2234 &bar0->prc_rxd0_n[i]);
2235
2236 val64 = readq(&bar0->prc_ctrl_n[i]);
2237 if (nic->rxd_mode == RXD_MODE_1)
2238 val64 |= PRC_CTRL_RC_ENABLED;
2239 else
2240 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2241 if (nic->device_type == XFRAME_II_DEVICE)
2242 val64 |= PRC_CTRL_GROUP_READS;
2243 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2244 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2245 writeq(val64, &bar0->prc_ctrl_n[i]);
2246 }
2247
2248 if (nic->rxd_mode == RXD_MODE_3B) {
2249 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2250 val64 = readq(&bar0->rx_pa_cfg);
2251 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2252 writeq(val64, &bar0->rx_pa_cfg);
2253 }
2254
2255 if (vlan_tag_strip == 0) {
2256 val64 = readq(&bar0->rx_pa_cfg);
2257 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2258 writeq(val64, &bar0->rx_pa_cfg);
2259 nic->vlan_strip_flag = 0;
2260 }
2261
2262 /*
2263 * Enabling MC-RLDRAM. After enabling the device, we timeout
2264 * for around 100ms, which is approximately the time required
2265 * for the device to be ready for operation.
2266 */
2267 val64 = readq(&bar0->mc_rldram_mrs);
2268 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2269 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2270 val64 = readq(&bar0->mc_rldram_mrs);
2271
2272 msleep(100); /* Delay by around 100 ms. */
2273
2274 /* Enabling ECC Protection. */
2275 val64 = readq(&bar0->adapter_control);
2276 val64 &= ~ADAPTER_ECC_EN;
2277 writeq(val64, &bar0->adapter_control);
2278
2279 /*
2280 * Verify if the device is ready to be enabled, if so enable
2281 * it.
2282 */
2283 val64 = readq(&bar0->adapter_status);
2284 if (!verify_xena_quiescence(nic)) {
2285 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2286 "Adapter status reads: 0x%llx\n",
2287 dev->name, (unsigned long long)val64);
2288 return FAILURE;
2289 }
2290
2291 /*
2292 * With some switches, link might be already up at this point.
2293 * Because of this weird behavior, when we enable laser,
2294 * we may not get link. We need to handle this. We cannot
2295 * figure out which switch is misbehaving. So we are forced to
2296 * make a global change.
2297 */
2298
2299 /* Enabling Laser. */
2300 val64 = readq(&bar0->adapter_control);
2301 val64 |= ADAPTER_EOI_TX_ON;
2302 writeq(val64, &bar0->adapter_control);
2303
2304 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2305 /*
2306 * Dont see link state interrupts initially on some switches,
2307 * so directly scheduling the link state task here.
2308 */
2309 schedule_work(&nic->set_link_task);
2310 }
2311 /* SXE-002: Initialize link and activity LED */
2312 subid = nic->pdev->subsystem_device;
2313 if (((subid & 0xFF) >= 0x07) &&
2314 (nic->device_type == XFRAME_I_DEVICE)) {
2315 val64 = readq(&bar0->gpio_control);
2316 val64 |= 0x0000800000000000ULL;
2317 writeq(val64, &bar0->gpio_control);
2318 val64 = 0x0411040400000000ULL;
2319 writeq(val64, (void __iomem *)bar0 + 0x2700);
2320 }
2321
2322 return SUCCESS;
2323}
2324/**
2325 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2326 */
2327static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2328 struct TxD *txdlp, int get_off)
2329{
2330 struct s2io_nic *nic = fifo_data->nic;
2331 struct sk_buff *skb;
2332 struct TxD *txds;
2333 u16 j, frg_cnt;
2334
2335 txds = txdlp;
2336 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2337 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2338 sizeof(u64), PCI_DMA_TODEVICE);
2339 txds++;
2340 }
2341
2342 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2343 if (!skb) {
2344 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2345 return NULL;
2346 }
2347 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2348 skb_headlen(skb), PCI_DMA_TODEVICE);
2349 frg_cnt = skb_shinfo(skb)->nr_frags;
2350 if (frg_cnt) {
2351 txds++;
2352 for (j = 0; j < frg_cnt; j++, txds++) {
2353 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2354 if (!txds->Buffer_Pointer)
2355 break;
2356 pci_unmap_page(nic->pdev,
2357 (dma_addr_t)txds->Buffer_Pointer,
2358 frag->size, PCI_DMA_TODEVICE);
2359 }
2360 }
2361 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2362 return skb;
2363}
2364
2365/**
2366 * free_tx_buffers - Free all queued Tx buffers
2367 * @nic : device private variable.
2368 * Description:
2369 * Free all queued Tx buffers.
2370 * Return Value: void
2371 */
2372
2373static void free_tx_buffers(struct s2io_nic *nic)
2374{
2375 struct net_device *dev = nic->dev;
2376 struct sk_buff *skb;
2377 struct TxD *txdp;
2378 int i, j;
2379 int cnt = 0;
2380 struct config_param *config = &nic->config;
2381 struct mac_info *mac_control = &nic->mac_control;
2382 struct stat_block *stats = mac_control->stats_info;
2383 struct swStat *swstats = &stats->sw_stat;
2384
2385 for (i = 0; i < config->tx_fifo_num; i++) {
2386 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2387 struct fifo_info *fifo = &mac_control->fifos[i];
2388 unsigned long flags;
2389
2390 spin_lock_irqsave(&fifo->tx_lock, flags);
2391 for (j = 0; j < tx_cfg->fifo_len; j++) {
2392 txdp = fifo->list_info[j].list_virt_addr;
2393 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2394 if (skb) {
2395 swstats->mem_freed += skb->truesize;
2396 dev_kfree_skb(skb);
2397 cnt++;
2398 }
2399 }
2400 DBG_PRINT(INTR_DBG,
2401 "%s: forcibly freeing %d skbs on FIFO%d\n",
2402 dev->name, cnt, i);
2403 fifo->tx_curr_get_info.offset = 0;
2404 fifo->tx_curr_put_info.offset = 0;
2405 spin_unlock_irqrestore(&fifo->tx_lock, flags);
2406 }
2407}
2408
2409/**
2410 * stop_nic - To stop the nic
2411 * @nic ; device private variable.
2412 * Description:
2413 * This function does exactly the opposite of what the start_nic()
2414 * function does. This function is called to stop the device.
2415 * Return Value:
2416 * void.
2417 */
2418
2419static void stop_nic(struct s2io_nic *nic)
2420{
2421 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2422 register u64 val64 = 0;
2423 u16 interruptible;
2424
2425 /* Disable all interrupts */
2426 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2427 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2428 interruptible |= TX_PIC_INTR;
2429 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2430
2431 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2432 val64 = readq(&bar0->adapter_control);
2433 val64 &= ~(ADAPTER_CNTL_EN);
2434 writeq(val64, &bar0->adapter_control);
2435}
2436
2437/**
2438 * fill_rx_buffers - Allocates the Rx side skbs
2439 * @ring_info: per ring structure
2440 * @from_card_up: If this is true, we will map the buffer to get
2441 * the dma address for buf0 and buf1 to give it to the card.
2442 * Else we will sync the already mapped buffer to give it to the card.
2443 * Description:
2444 * The function allocates Rx side skbs and puts the physical
2445 * address of these buffers into the RxD buffer pointers, so that the NIC
2446 * can DMA the received frame into these locations.
2447 * The NIC supports 3 receive modes, viz
2448 * 1. single buffer,
2449 * 2. three buffer and
2450 * 3. Five buffer modes.
2451 * Each mode defines how many fragments the received frame will be split
2452 * up into by the NIC. The frame is split into L3 header, L4 Header,
2453 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2454 * is split into 3 fragments. As of now only single buffer mode is
2455 * supported.
2456 * Return Value:
2457 * SUCCESS on success or an appropriate -ve value on failure.
2458 */
2459static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2460 int from_card_up)
2461{
2462 struct sk_buff *skb;
2463 struct RxD_t *rxdp;
2464 int off, size, block_no, block_no1;
2465 u32 alloc_tab = 0;
2466 u32 alloc_cnt;
2467 u64 tmp;
2468 struct buffAdd *ba;
2469 struct RxD_t *first_rxdp = NULL;
2470 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2471 int rxd_index = 0;
2472 struct RxD1 *rxdp1;
2473 struct RxD3 *rxdp3;
2474 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2475
2476 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2477
2478 block_no1 = ring->rx_curr_get_info.block_index;
2479 while (alloc_tab < alloc_cnt) {
2480 block_no = ring->rx_curr_put_info.block_index;
2481
2482 off = ring->rx_curr_put_info.offset;
2483
2484 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2485
2486 rxd_index = off + 1;
2487 if (block_no)
2488 rxd_index += (block_no * ring->rxd_count);
2489
2490 if ((block_no == block_no1) &&
2491 (off == ring->rx_curr_get_info.offset) &&
2492 (rxdp->Host_Control)) {
2493 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2494 ring->dev->name);
2495 goto end;
2496 }
2497 if (off && (off == ring->rxd_count)) {
2498 ring->rx_curr_put_info.block_index++;
2499 if (ring->rx_curr_put_info.block_index ==
2500 ring->block_count)
2501 ring->rx_curr_put_info.block_index = 0;
2502 block_no = ring->rx_curr_put_info.block_index;
2503 off = 0;
2504 ring->rx_curr_put_info.offset = off;
2505 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2506 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2507 ring->dev->name, rxdp);
2508
2509 }
2510
2511 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2512 ((ring->rxd_mode == RXD_MODE_3B) &&
2513 (rxdp->Control_2 & s2BIT(0)))) {
2514 ring->rx_curr_put_info.offset = off;
2515 goto end;
2516 }
2517 /* calculate size of skb based on ring mode */
2518 size = ring->mtu +
2519 HEADER_ETHERNET_II_802_3_SIZE +
2520 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2521 if (ring->rxd_mode == RXD_MODE_1)
2522 size += NET_IP_ALIGN;
2523 else
2524 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2525
2526 /* allocate skb */
2527 skb = dev_alloc_skb(size);
2528 if (!skb) {
2529 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2530 ring->dev->name);
2531 if (first_rxdp) {
2532 wmb();
2533 first_rxdp->Control_1 |= RXD_OWN_XENA;
2534 }
2535 swstats->mem_alloc_fail_cnt++;
2536
2537 return -ENOMEM ;
2538 }
2539 swstats->mem_allocated += skb->truesize;
2540
2541 if (ring->rxd_mode == RXD_MODE_1) {
2542 /* 1 buffer mode - normal operation mode */
2543 rxdp1 = (struct RxD1 *)rxdp;
2544 memset(rxdp, 0, sizeof(struct RxD1));
2545 skb_reserve(skb, NET_IP_ALIGN);
2546 rxdp1->Buffer0_ptr =
2547 pci_map_single(ring->pdev, skb->data,
2548 size - NET_IP_ALIGN,
2549 PCI_DMA_FROMDEVICE);
2550 if (pci_dma_mapping_error(nic->pdev,
2551 rxdp1->Buffer0_ptr))
2552 goto pci_map_failed;
2553
2554 rxdp->Control_2 =
2555 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2556 rxdp->Host_Control = (unsigned long)skb;
2557 } else if (ring->rxd_mode == RXD_MODE_3B) {
2558 /*
2559 * 2 buffer mode -
2560 * 2 buffer mode provides 128
2561 * byte aligned receive buffers.
2562 */
2563
2564 rxdp3 = (struct RxD3 *)rxdp;
2565 /* save buffer pointers to avoid frequent dma mapping */
2566 Buffer0_ptr = rxdp3->Buffer0_ptr;
2567 Buffer1_ptr = rxdp3->Buffer1_ptr;
2568 memset(rxdp, 0, sizeof(struct RxD3));
2569 /* restore the buffer pointers for dma sync*/
2570 rxdp3->Buffer0_ptr = Buffer0_ptr;
2571 rxdp3->Buffer1_ptr = Buffer1_ptr;
2572
2573 ba = &ring->ba[block_no][off];
2574 skb_reserve(skb, BUF0_LEN);
2575 tmp = (u64)(unsigned long)skb->data;
2576 tmp += ALIGN_SIZE;
2577 tmp &= ~ALIGN_SIZE;
2578 skb->data = (void *) (unsigned long)tmp;
2579 skb_reset_tail_pointer(skb);
2580
2581 if (from_card_up) {
2582 rxdp3->Buffer0_ptr =
2583 pci_map_single(ring->pdev, ba->ba_0,
2584 BUF0_LEN,
2585 PCI_DMA_FROMDEVICE);
2586 if (pci_dma_mapping_error(nic->pdev,
2587 rxdp3->Buffer0_ptr))
2588 goto pci_map_failed;
2589 } else
2590 pci_dma_sync_single_for_device(ring->pdev,
2591 (dma_addr_t)rxdp3->Buffer0_ptr,
2592 BUF0_LEN,
2593 PCI_DMA_FROMDEVICE);
2594
2595 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2596 if (ring->rxd_mode == RXD_MODE_3B) {
2597 /* Two buffer mode */
2598
2599 /*
2600 * Buffer2 will have L3/L4 header plus
2601 * L4 payload
2602 */
2603 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2604 skb->data,
2605 ring->mtu + 4,
2606 PCI_DMA_FROMDEVICE);
2607
2608 if (pci_dma_mapping_error(nic->pdev,
2609 rxdp3->Buffer2_ptr))
2610 goto pci_map_failed;
2611
2612 if (from_card_up) {
2613 rxdp3->Buffer1_ptr =
2614 pci_map_single(ring->pdev,
2615 ba->ba_1,
2616 BUF1_LEN,
2617 PCI_DMA_FROMDEVICE);
2618
2619 if (pci_dma_mapping_error(nic->pdev,
2620 rxdp3->Buffer1_ptr)) {
2621 pci_unmap_single(ring->pdev,
2622 (dma_addr_t)(unsigned long)
2623 skb->data,
2624 ring->mtu + 4,
2625 PCI_DMA_FROMDEVICE);
2626 goto pci_map_failed;
2627 }
2628 }
2629 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2630 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2631 (ring->mtu + 4);
2632 }
2633 rxdp->Control_2 |= s2BIT(0);
2634 rxdp->Host_Control = (unsigned long) (skb);
2635 }
2636 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2637 rxdp->Control_1 |= RXD_OWN_XENA;
2638 off++;
2639 if (off == (ring->rxd_count + 1))
2640 off = 0;
2641 ring->rx_curr_put_info.offset = off;
2642
2643 rxdp->Control_2 |= SET_RXD_MARKER;
2644 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2645 if (first_rxdp) {
2646 wmb();
2647 first_rxdp->Control_1 |= RXD_OWN_XENA;
2648 }
2649 first_rxdp = rxdp;
2650 }
2651 ring->rx_bufs_left += 1;
2652 alloc_tab++;
2653 }
2654
2655end:
2656 /* Transfer ownership of first descriptor to adapter just before
2657 * exiting. Before that, use memory barrier so that ownership
2658 * and other fields are seen by adapter correctly.
2659 */
2660 if (first_rxdp) {
2661 wmb();
2662 first_rxdp->Control_1 |= RXD_OWN_XENA;
2663 }
2664
2665 return SUCCESS;
2666
2667pci_map_failed:
2668 swstats->pci_map_fail_cnt++;
2669 swstats->mem_freed += skb->truesize;
2670 dev_kfree_skb_irq(skb);
2671 return -ENOMEM;
2672}
2673
2674static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2675{
2676 struct net_device *dev = sp->dev;
2677 int j;
2678 struct sk_buff *skb;
2679 struct RxD_t *rxdp;
2680 struct RxD1 *rxdp1;
2681 struct RxD3 *rxdp3;
2682 struct mac_info *mac_control = &sp->mac_control;
2683 struct stat_block *stats = mac_control->stats_info;
2684 struct swStat *swstats = &stats->sw_stat;
2685
2686 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2687 rxdp = mac_control->rings[ring_no].
2688 rx_blocks[blk].rxds[j].virt_addr;
2689 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2690 if (!skb)
2691 continue;
2692 if (sp->rxd_mode == RXD_MODE_1) {
2693 rxdp1 = (struct RxD1 *)rxdp;
2694 pci_unmap_single(sp->pdev,
2695 (dma_addr_t)rxdp1->Buffer0_ptr,
2696 dev->mtu +
2697 HEADER_ETHERNET_II_802_3_SIZE +
2698 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2699 PCI_DMA_FROMDEVICE);
2700 memset(rxdp, 0, sizeof(struct RxD1));
2701 } else if (sp->rxd_mode == RXD_MODE_3B) {
2702 rxdp3 = (struct RxD3 *)rxdp;
2703 pci_unmap_single(sp->pdev,
2704 (dma_addr_t)rxdp3->Buffer0_ptr,
2705 BUF0_LEN,
2706 PCI_DMA_FROMDEVICE);
2707 pci_unmap_single(sp->pdev,
2708 (dma_addr_t)rxdp3->Buffer1_ptr,
2709 BUF1_LEN,
2710 PCI_DMA_FROMDEVICE);
2711 pci_unmap_single(sp->pdev,
2712 (dma_addr_t)rxdp3->Buffer2_ptr,
2713 dev->mtu + 4,
2714 PCI_DMA_FROMDEVICE);
2715 memset(rxdp, 0, sizeof(struct RxD3));
2716 }
2717 swstats->mem_freed += skb->truesize;
2718 dev_kfree_skb(skb);
2719 mac_control->rings[ring_no].rx_bufs_left -= 1;
2720 }
2721}
2722
2723/**
2724 * free_rx_buffers - Frees all Rx buffers
2725 * @sp: device private variable.
2726 * Description:
2727 * This function will free all Rx buffers allocated by host.
2728 * Return Value:
2729 * NONE.
2730 */
2731
2732static void free_rx_buffers(struct s2io_nic *sp)
2733{
2734 struct net_device *dev = sp->dev;
2735 int i, blk = 0, buf_cnt = 0;
2736 struct config_param *config = &sp->config;
2737 struct mac_info *mac_control = &sp->mac_control;
2738
2739 for (i = 0; i < config->rx_ring_num; i++) {
2740 struct ring_info *ring = &mac_control->rings[i];
2741
2742 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2743 free_rxd_blk(sp, i, blk);
2744
2745 ring->rx_curr_put_info.block_index = 0;
2746 ring->rx_curr_get_info.block_index = 0;
2747 ring->rx_curr_put_info.offset = 0;
2748 ring->rx_curr_get_info.offset = 0;
2749 ring->rx_bufs_left = 0;
2750 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2751 dev->name, buf_cnt, i);
2752 }
2753}
2754
2755static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2756{
2757 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2758 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2759 ring->dev->name);
2760 }
2761 return 0;
2762}
2763
2764/**
2765 * s2io_poll - Rx interrupt handler for NAPI support
2766 * @napi : pointer to the napi structure.
2767 * @budget : The number of packets that were budgeted to be processed
2768 * during one pass through the 'Poll" function.
2769 * Description:
2770 * Comes into picture only if NAPI support has been incorporated. It does
2771 * the same thing that rx_intr_handler does, but not in a interrupt context
2772 * also It will process only a given number of packets.
2773 * Return value:
2774 * 0 on success and 1 if there are No Rx packets to be processed.
2775 */
2776
2777static int s2io_poll_msix(struct napi_struct *napi, int budget)
2778{
2779 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2780 struct net_device *dev = ring->dev;
2781 int pkts_processed = 0;
2782 u8 __iomem *addr = NULL;
2783 u8 val8 = 0;
2784 struct s2io_nic *nic = netdev_priv(dev);
2785 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2786 int budget_org = budget;
2787
2788 if (unlikely(!is_s2io_card_up(nic)))
2789 return 0;
2790
2791 pkts_processed = rx_intr_handler(ring, budget);
2792 s2io_chk_rx_buffers(nic, ring);
2793
2794 if (pkts_processed < budget_org) {
2795 napi_complete(napi);
2796 /*Re Enable MSI-Rx Vector*/
2797 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2798 addr += 7 - ring->ring_no;
2799 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2800 writeb(val8, addr);
2801 val8 = readb(addr);
2802 }
2803 return pkts_processed;
2804}
2805
2806static int s2io_poll_inta(struct napi_struct *napi, int budget)
2807{
2808 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2809 int pkts_processed = 0;
2810 int ring_pkts_processed, i;
2811 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2812 int budget_org = budget;
2813 struct config_param *config = &nic->config;
2814 struct mac_info *mac_control = &nic->mac_control;
2815
2816 if (unlikely(!is_s2io_card_up(nic)))
2817 return 0;
2818
2819 for (i = 0; i < config->rx_ring_num; i++) {
2820 struct ring_info *ring = &mac_control->rings[i];
2821 ring_pkts_processed = rx_intr_handler(ring, budget);
2822 s2io_chk_rx_buffers(nic, ring);
2823 pkts_processed += ring_pkts_processed;
2824 budget -= ring_pkts_processed;
2825 if (budget <= 0)
2826 break;
2827 }
2828 if (pkts_processed < budget_org) {
2829 napi_complete(napi);
2830 /* Re enable the Rx interrupts for the ring */
2831 writeq(0, &bar0->rx_traffic_mask);
2832 readl(&bar0->rx_traffic_mask);
2833 }
2834 return pkts_processed;
2835}
2836
2837#ifdef CONFIG_NET_POLL_CONTROLLER
2838/**
2839 * s2io_netpoll - netpoll event handler entry point
2840 * @dev : pointer to the device structure.
2841 * Description:
2842 * This function will be called by upper layer to check for events on the
2843 * interface in situations where interrupts are disabled. It is used for
2844 * specific in-kernel networking tasks, such as remote consoles and kernel
2845 * debugging over the network (example netdump in RedHat).
2846 */
2847static void s2io_netpoll(struct net_device *dev)
2848{
2849 struct s2io_nic *nic = netdev_priv(dev);
2850 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2851 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2852 int i;
2853 struct config_param *config = &nic->config;
2854 struct mac_info *mac_control = &nic->mac_control;
2855
2856 if (pci_channel_offline(nic->pdev))
2857 return;
2858
2859 disable_irq(dev->irq);
2860
2861 writeq(val64, &bar0->rx_traffic_int);
2862 writeq(val64, &bar0->tx_traffic_int);
2863
2864 /* we need to free up the transmitted skbufs or else netpoll will
2865 * run out of skbs and will fail and eventually netpoll application such
2866 * as netdump will fail.
2867 */
2868 for (i = 0; i < config->tx_fifo_num; i++)
2869 tx_intr_handler(&mac_control->fifos[i]);
2870
2871 /* check for received packet and indicate up to network */
2872 for (i = 0; i < config->rx_ring_num; i++) {
2873 struct ring_info *ring = &mac_control->rings[i];
2874
2875 rx_intr_handler(ring, 0);
2876 }
2877
2878 for (i = 0; i < config->rx_ring_num; i++) {
2879 struct ring_info *ring = &mac_control->rings[i];
2880
2881 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2882 DBG_PRINT(INFO_DBG,
2883 "%s: Out of memory in Rx Netpoll!!\n",
2884 dev->name);
2885 break;
2886 }
2887 }
2888 enable_irq(dev->irq);
2889}
2890#endif
2891
2892/**
2893 * rx_intr_handler - Rx interrupt handler
2894 * @ring_info: per ring structure.
2895 * @budget: budget for napi processing.
2896 * Description:
2897 * If the interrupt is because of a received frame or if the
2898 * receive ring contains fresh as yet un-processed frames,this function is
2899 * called. It picks out the RxD at which place the last Rx processing had
2900 * stopped and sends the skb to the OSM's Rx handler and then increments
2901 * the offset.
2902 * Return Value:
2903 * No. of napi packets processed.
2904 */
2905static int rx_intr_handler(struct ring_info *ring_data, int budget)
2906{
2907 int get_block, put_block;
2908 struct rx_curr_get_info get_info, put_info;
2909 struct RxD_t *rxdp;
2910 struct sk_buff *skb;
2911 int pkt_cnt = 0, napi_pkts = 0;
2912 int i;
2913 struct RxD1 *rxdp1;
2914 struct RxD3 *rxdp3;
2915
2916 get_info = ring_data->rx_curr_get_info;
2917 get_block = get_info.block_index;
2918 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2919 put_block = put_info.block_index;
2920 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2921
2922 while (RXD_IS_UP2DT(rxdp)) {
2923 /*
2924 * If your are next to put index then it's
2925 * FIFO full condition
2926 */
2927 if ((get_block == put_block) &&
2928 (get_info.offset + 1) == put_info.offset) {
2929 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2930 ring_data->dev->name);
2931 break;
2932 }
2933 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2934 if (skb == NULL) {
2935 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2936 ring_data->dev->name);
2937 return 0;
2938 }
2939 if (ring_data->rxd_mode == RXD_MODE_1) {
2940 rxdp1 = (struct RxD1 *)rxdp;
2941 pci_unmap_single(ring_data->pdev, (dma_addr_t)
2942 rxdp1->Buffer0_ptr,
2943 ring_data->mtu +
2944 HEADER_ETHERNET_II_802_3_SIZE +
2945 HEADER_802_2_SIZE +
2946 HEADER_SNAP_SIZE,
2947 PCI_DMA_FROMDEVICE);
2948 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2949 rxdp3 = (struct RxD3 *)rxdp;
2950 pci_dma_sync_single_for_cpu(ring_data->pdev,
2951 (dma_addr_t)rxdp3->Buffer0_ptr,
2952 BUF0_LEN,
2953 PCI_DMA_FROMDEVICE);
2954 pci_unmap_single(ring_data->pdev,
2955 (dma_addr_t)rxdp3->Buffer2_ptr,
2956 ring_data->mtu + 4,
2957 PCI_DMA_FROMDEVICE);
2958 }
2959 prefetch(skb->data);
2960 rx_osm_handler(ring_data, rxdp);
2961 get_info.offset++;
2962 ring_data->rx_curr_get_info.offset = get_info.offset;
2963 rxdp = ring_data->rx_blocks[get_block].
2964 rxds[get_info.offset].virt_addr;
2965 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2966 get_info.offset = 0;
2967 ring_data->rx_curr_get_info.offset = get_info.offset;
2968 get_block++;
2969 if (get_block == ring_data->block_count)
2970 get_block = 0;
2971 ring_data->rx_curr_get_info.block_index = get_block;
2972 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2973 }
2974
2975 if (ring_data->nic->config.napi) {
2976 budget--;
2977 napi_pkts++;
2978 if (!budget)
2979 break;
2980 }
2981 pkt_cnt++;
2982 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2983 break;
2984 }
2985 if (ring_data->lro) {
2986 /* Clear all LRO sessions before exiting */
2987 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2988 struct lro *lro = &ring_data->lro0_n[i];
2989 if (lro->in_use) {
2990 update_L3L4_header(ring_data->nic, lro);
2991 queue_rx_frame(lro->parent, lro->vlan_tag);
2992 clear_lro_session(lro);
2993 }
2994 }
2995 }
2996 return napi_pkts;
2997}
2998
2999/**
3000 * tx_intr_handler - Transmit interrupt handler
3001 * @nic : device private variable
3002 * Description:
3003 * If an interrupt was raised to indicate DMA complete of the
3004 * Tx packet, this function is called. It identifies the last TxD
3005 * whose buffer was freed and frees all skbs whose data have already
3006 * DMA'ed into the NICs internal memory.
3007 * Return Value:
3008 * NONE
3009 */
3010
3011static void tx_intr_handler(struct fifo_info *fifo_data)
3012{
3013 struct s2io_nic *nic = fifo_data->nic;
3014 struct tx_curr_get_info get_info, put_info;
3015 struct sk_buff *skb = NULL;
3016 struct TxD *txdlp;
3017 int pkt_cnt = 0;
3018 unsigned long flags = 0;
3019 u8 err_mask;
3020 struct stat_block *stats = nic->mac_control.stats_info;
3021 struct swStat *swstats = &stats->sw_stat;
3022
3023 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3024 return;
3025
3026 get_info = fifo_data->tx_curr_get_info;
3027 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3028 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3029 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3030 (get_info.offset != put_info.offset) &&
3031 (txdlp->Host_Control)) {
3032 /* Check for TxD errors */
3033 if (txdlp->Control_1 & TXD_T_CODE) {
3034 unsigned long long err;
3035 err = txdlp->Control_1 & TXD_T_CODE;
3036 if (err & 0x1) {
3037 swstats->parity_err_cnt++;
3038 }
3039
3040 /* update t_code statistics */
3041 err_mask = err >> 48;
3042 switch (err_mask) {
3043 case 2:
3044 swstats->tx_buf_abort_cnt++;
3045 break;
3046
3047 case 3:
3048 swstats->tx_desc_abort_cnt++;
3049 break;
3050
3051 case 7:
3052 swstats->tx_parity_err_cnt++;
3053 break;
3054
3055 case 10:
3056 swstats->tx_link_loss_cnt++;
3057 break;
3058
3059 case 15:
3060 swstats->tx_list_proc_err_cnt++;
3061 break;
3062 }
3063 }
3064
3065 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3066 if (skb == NULL) {
3067 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3068 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3069 __func__);
3070 return;
3071 }
3072 pkt_cnt++;
3073
3074 /* Updating the statistics block */
3075 swstats->mem_freed += skb->truesize;
3076 dev_kfree_skb_irq(skb);
3077
3078 get_info.offset++;
3079 if (get_info.offset == get_info.fifo_len + 1)
3080 get_info.offset = 0;
3081 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3082 fifo_data->tx_curr_get_info.offset = get_info.offset;
3083 }
3084
3085 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3086
3087 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3088}
3089
3090/**
3091 * s2io_mdio_write - Function to write in to MDIO registers
3092 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3093 * @addr : address value
3094 * @value : data value
3095 * @dev : pointer to net_device structure
3096 * Description:
3097 * This function is used to write values to the MDIO registers
3098 * NONE
3099 */
3100static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3101 struct net_device *dev)
3102{
3103 u64 val64;
3104 struct s2io_nic *sp = netdev_priv(dev);
3105 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3106
3107 /* address transaction */
3108 val64 = MDIO_MMD_INDX_ADDR(addr) |
3109 MDIO_MMD_DEV_ADDR(mmd_type) |
3110 MDIO_MMS_PRT_ADDR(0x0);
3111 writeq(val64, &bar0->mdio_control);
3112 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3113 writeq(val64, &bar0->mdio_control);
3114 udelay(100);
3115
3116 /* Data transaction */
3117 val64 = MDIO_MMD_INDX_ADDR(addr) |
3118 MDIO_MMD_DEV_ADDR(mmd_type) |
3119 MDIO_MMS_PRT_ADDR(0x0) |
3120 MDIO_MDIO_DATA(value) |
3121 MDIO_OP(MDIO_OP_WRITE_TRANS);
3122 writeq(val64, &bar0->mdio_control);
3123 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3124 writeq(val64, &bar0->mdio_control);
3125 udelay(100);
3126
3127 val64 = MDIO_MMD_INDX_ADDR(addr) |
3128 MDIO_MMD_DEV_ADDR(mmd_type) |
3129 MDIO_MMS_PRT_ADDR(0x0) |
3130 MDIO_OP(MDIO_OP_READ_TRANS);
3131 writeq(val64, &bar0->mdio_control);
3132 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3133 writeq(val64, &bar0->mdio_control);
3134 udelay(100);
3135}
3136
3137/**
3138 * s2io_mdio_read - Function to write in to MDIO registers
3139 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3140 * @addr : address value
3141 * @dev : pointer to net_device structure
3142 * Description:
3143 * This function is used to read values to the MDIO registers
3144 * NONE
3145 */
3146static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3147{
3148 u64 val64 = 0x0;
3149 u64 rval64 = 0x0;
3150 struct s2io_nic *sp = netdev_priv(dev);
3151 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3152
3153 /* address transaction */
3154 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3155 | MDIO_MMD_DEV_ADDR(mmd_type)
3156 | MDIO_MMS_PRT_ADDR(0x0));
3157 writeq(val64, &bar0->mdio_control);
3158 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3159 writeq(val64, &bar0->mdio_control);
3160 udelay(100);
3161
3162 /* Data transaction */
3163 val64 = MDIO_MMD_INDX_ADDR(addr) |
3164 MDIO_MMD_DEV_ADDR(mmd_type) |
3165 MDIO_MMS_PRT_ADDR(0x0) |
3166 MDIO_OP(MDIO_OP_READ_TRANS);
3167 writeq(val64, &bar0->mdio_control);
3168 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3169 writeq(val64, &bar0->mdio_control);
3170 udelay(100);
3171
3172 /* Read the value from regs */
3173 rval64 = readq(&bar0->mdio_control);
3174 rval64 = rval64 & 0xFFFF0000;
3175 rval64 = rval64 >> 16;
3176 return rval64;
3177}
3178
3179/**
3180 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3181 * @counter : counter value to be updated
3182 * @flag : flag to indicate the status
3183 * @type : counter type
3184 * Description:
3185 * This function is to check the status of the xpak counters value
3186 * NONE
3187 */
3188
3189static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3190 u16 flag, u16 type)
3191{
3192 u64 mask = 0x3;
3193 u64 val64;
3194 int i;
3195 for (i = 0; i < index; i++)
3196 mask = mask << 0x2;
3197
3198 if (flag > 0) {
3199 *counter = *counter + 1;
3200 val64 = *regs_stat & mask;
3201 val64 = val64 >> (index * 0x2);
3202 val64 = val64 + 1;
3203 if (val64 == 3) {
3204 switch (type) {
3205 case 1:
3206 DBG_PRINT(ERR_DBG,
3207 "Take Xframe NIC out of service.\n");
3208 DBG_PRINT(ERR_DBG,
3209"Excessive temperatures may result in premature transceiver failure.\n");
3210 break;
3211 case 2:
3212 DBG_PRINT(ERR_DBG,
3213 "Take Xframe NIC out of service.\n");
3214 DBG_PRINT(ERR_DBG,
3215"Excessive bias currents may indicate imminent laser diode failure.\n");
3216 break;
3217 case 3:
3218 DBG_PRINT(ERR_DBG,
3219 "Take Xframe NIC out of service.\n");
3220 DBG_PRINT(ERR_DBG,
3221"Excessive laser output power may saturate far-end receiver.\n");
3222 break;
3223 default:
3224 DBG_PRINT(ERR_DBG,
3225 "Incorrect XPAK Alarm type\n");
3226 }
3227 val64 = 0x0;
3228 }
3229 val64 = val64 << (index * 0x2);
3230 *regs_stat = (*regs_stat & (~mask)) | (val64);
3231
3232 } else {
3233 *regs_stat = *regs_stat & (~mask);
3234 }
3235}
3236
3237/**
3238 * s2io_updt_xpak_counter - Function to update the xpak counters
3239 * @dev : pointer to net_device struct
3240 * Description:
3241 * This function is to upate the status of the xpak counters value
3242 * NONE
3243 */
3244static void s2io_updt_xpak_counter(struct net_device *dev)
3245{
3246 u16 flag = 0x0;
3247 u16 type = 0x0;
3248 u16 val16 = 0x0;
3249 u64 val64 = 0x0;
3250 u64 addr = 0x0;
3251
3252 struct s2io_nic *sp = netdev_priv(dev);
3253 struct stat_block *stats = sp->mac_control.stats_info;
3254 struct xpakStat *xstats = &stats->xpak_stat;
3255
3256 /* Check the communication with the MDIO slave */
3257 addr = MDIO_CTRL1;
3258 val64 = 0x0;
3259 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3260 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3261 DBG_PRINT(ERR_DBG,
3262 "ERR: MDIO slave access failed - Returned %llx\n",
3263 (unsigned long long)val64);
3264 return;
3265 }
3266
3267 /* Check for the expected value of control reg 1 */
3268 if (val64 != MDIO_CTRL1_SPEED10G) {
3269 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3270 "Returned: %llx- Expected: 0x%x\n",
3271 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3272 return;
3273 }
3274
3275 /* Loading the DOM register to MDIO register */
3276 addr = 0xA100;
3277 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3278 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3279
3280 /* Reading the Alarm flags */
3281 addr = 0xA070;
3282 val64 = 0x0;
3283 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3284
3285 flag = CHECKBIT(val64, 0x7);
3286 type = 1;
3287 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3288 &xstats->xpak_regs_stat,
3289 0x0, flag, type);
3290
3291 if (CHECKBIT(val64, 0x6))
3292 xstats->alarm_transceiver_temp_low++;
3293
3294 flag = CHECKBIT(val64, 0x3);
3295 type = 2;
3296 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3297 &xstats->xpak_regs_stat,
3298 0x2, flag, type);
3299
3300 if (CHECKBIT(val64, 0x2))
3301 xstats->alarm_laser_bias_current_low++;
3302
3303 flag = CHECKBIT(val64, 0x1);
3304 type = 3;
3305 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3306 &xstats->xpak_regs_stat,
3307 0x4, flag, type);
3308
3309 if (CHECKBIT(val64, 0x0))
3310 xstats->alarm_laser_output_power_low++;
3311
3312 /* Reading the Warning flags */
3313 addr = 0xA074;
3314 val64 = 0x0;
3315 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3316
3317 if (CHECKBIT(val64, 0x7))
3318 xstats->warn_transceiver_temp_high++;
3319
3320 if (CHECKBIT(val64, 0x6))
3321 xstats->warn_transceiver_temp_low++;
3322
3323 if (CHECKBIT(val64, 0x3))
3324 xstats->warn_laser_bias_current_high++;
3325
3326 if (CHECKBIT(val64, 0x2))
3327 xstats->warn_laser_bias_current_low++;
3328
3329 if (CHECKBIT(val64, 0x1))
3330 xstats->warn_laser_output_power_high++;
3331
3332 if (CHECKBIT(val64, 0x0))
3333 xstats->warn_laser_output_power_low++;
3334}
3335
3336/**
3337 * wait_for_cmd_complete - waits for a command to complete.
3338 * @sp : private member of the device structure, which is a pointer to the
3339 * s2io_nic structure.
3340 * Description: Function that waits for a command to Write into RMAC
3341 * ADDR DATA registers to be completed and returns either success or
3342 * error depending on whether the command was complete or not.
3343 * Return value:
3344 * SUCCESS on success and FAILURE on failure.
3345 */
3346
3347static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3348 int bit_state)
3349{
3350 int ret = FAILURE, cnt = 0, delay = 1;
3351 u64 val64;
3352
3353 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3354 return FAILURE;
3355
3356 do {
3357 val64 = readq(addr);
3358 if (bit_state == S2IO_BIT_RESET) {
3359 if (!(val64 & busy_bit)) {
3360 ret = SUCCESS;
3361 break;
3362 }
3363 } else {
3364 if (val64 & busy_bit) {
3365 ret = SUCCESS;
3366 break;
3367 }
3368 }
3369
3370 if (in_interrupt())
3371 mdelay(delay);
3372 else
3373 msleep(delay);
3374
3375 if (++cnt >= 10)
3376 delay = 50;
3377 } while (cnt < 20);
3378 return ret;
3379}
3380/*
3381 * check_pci_device_id - Checks if the device id is supported
3382 * @id : device id
3383 * Description: Function to check if the pci device id is supported by driver.
3384 * Return value: Actual device id if supported else PCI_ANY_ID
3385 */
3386static u16 check_pci_device_id(u16 id)
3387{
3388 switch (id) {
3389 case PCI_DEVICE_ID_HERC_WIN:
3390 case PCI_DEVICE_ID_HERC_UNI:
3391 return XFRAME_II_DEVICE;
3392 case PCI_DEVICE_ID_S2IO_UNI:
3393 case PCI_DEVICE_ID_S2IO_WIN:
3394 return XFRAME_I_DEVICE;
3395 default:
3396 return PCI_ANY_ID;
3397 }
3398}
3399
3400/**
3401 * s2io_reset - Resets the card.
3402 * @sp : private member of the device structure.
3403 * Description: Function to Reset the card. This function then also
3404 * restores the previously saved PCI configuration space registers as
3405 * the card reset also resets the configuration space.
3406 * Return value:
3407 * void.
3408 */
3409
3410static void s2io_reset(struct s2io_nic *sp)
3411{
3412 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3413 u64 val64;
3414 u16 subid, pci_cmd;
3415 int i;
3416 u16 val16;
3417 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3418 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3419 struct stat_block *stats;
3420 struct swStat *swstats;
3421
3422 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3423 __func__, pci_name(sp->pdev));
3424
3425 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3426 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3427
3428 val64 = SW_RESET_ALL;
3429 writeq(val64, &bar0->sw_reset);
3430 if (strstr(sp->product_name, "CX4"))
3431 msleep(750);
3432 msleep(250);
3433 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3434
3435 /* Restore the PCI state saved during initialization. */
3436 pci_restore_state(sp->pdev);
3437 pci_save_state(sp->pdev);
3438 pci_read_config_word(sp->pdev, 0x2, &val16);
3439 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3440 break;
3441 msleep(200);
3442 }
3443
3444 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3445 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3446
3447 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3448
3449 s2io_init_pci(sp);
3450
3451 /* Set swapper to enable I/O register access */
3452 s2io_set_swapper(sp);
3453
3454 /* restore mac_addr entries */
3455 do_s2io_restore_unicast_mc(sp);
3456
3457 /* Restore the MSIX table entries from local variables */
3458 restore_xmsi_data(sp);
3459
3460 /* Clear certain PCI/PCI-X fields after reset */
3461 if (sp->device_type == XFRAME_II_DEVICE) {
3462 /* Clear "detected parity error" bit */
3463 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3464
3465 /* Clearing PCIX Ecc status register */
3466 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3467
3468 /* Clearing PCI_STATUS error reflected here */
3469 writeq(s2BIT(62), &bar0->txpic_int_reg);
3470 }
3471
3472 /* Reset device statistics maintained by OS */
3473 memset(&sp->stats, 0, sizeof(struct net_device_stats));
3474
3475 stats = sp->mac_control.stats_info;
3476 swstats = &stats->sw_stat;
3477
3478 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3479 up_cnt = swstats->link_up_cnt;
3480 down_cnt = swstats->link_down_cnt;
3481 up_time = swstats->link_up_time;
3482 down_time = swstats->link_down_time;
3483 reset_cnt = swstats->soft_reset_cnt;
3484 mem_alloc_cnt = swstats->mem_allocated;
3485 mem_free_cnt = swstats->mem_freed;
3486 watchdog_cnt = swstats->watchdog_timer_cnt;
3487
3488 memset(stats, 0, sizeof(struct stat_block));
3489
3490 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3491 swstats->link_up_cnt = up_cnt;
3492 swstats->link_down_cnt = down_cnt;
3493 swstats->link_up_time = up_time;
3494 swstats->link_down_time = down_time;
3495 swstats->soft_reset_cnt = reset_cnt;
3496 swstats->mem_allocated = mem_alloc_cnt;
3497 swstats->mem_freed = mem_free_cnt;
3498 swstats->watchdog_timer_cnt = watchdog_cnt;
3499
3500 /* SXE-002: Configure link and activity LED to turn it off */
3501 subid = sp->pdev->subsystem_device;
3502 if (((subid & 0xFF) >= 0x07) &&
3503 (sp->device_type == XFRAME_I_DEVICE)) {
3504 val64 = readq(&bar0->gpio_control);
3505 val64 |= 0x0000800000000000ULL;
3506 writeq(val64, &bar0->gpio_control);
3507 val64 = 0x0411040400000000ULL;
3508 writeq(val64, (void __iomem *)bar0 + 0x2700);
3509 }
3510
3511 /*
3512 * Clear spurious ECC interrupts that would have occurred on
3513 * XFRAME II cards after reset.
3514 */
3515 if (sp->device_type == XFRAME_II_DEVICE) {
3516 val64 = readq(&bar0->pcc_err_reg);
3517 writeq(val64, &bar0->pcc_err_reg);
3518 }
3519
3520 sp->device_enabled_once = false;
3521}
3522
3523/**
3524 * s2io_set_swapper - to set the swapper controle on the card
3525 * @sp : private member of the device structure,
3526 * pointer to the s2io_nic structure.
3527 * Description: Function to set the swapper control on the card
3528 * correctly depending on the 'endianness' of the system.
3529 * Return value:
3530 * SUCCESS on success and FAILURE on failure.
3531 */
3532
3533static int s2io_set_swapper(struct s2io_nic *sp)
3534{
3535 struct net_device *dev = sp->dev;
3536 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3537 u64 val64, valt, valr;
3538
3539 /*
3540 * Set proper endian settings and verify the same by reading
3541 * the PIF Feed-back register.
3542 */
3543
3544 val64 = readq(&bar0->pif_rd_swapper_fb);
3545 if (val64 != 0x0123456789ABCDEFULL) {
3546 int i = 0;
3547 static const u64 value[] = {
3548 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3549 0x8100008181000081ULL, /* FE=1, SE=0 */
3550 0x4200004242000042ULL, /* FE=0, SE=1 */
3551 0 /* FE=0, SE=0 */
3552 };
3553
3554 while (i < 4) {
3555 writeq(value[i], &bar0->swapper_ctrl);
3556 val64 = readq(&bar0->pif_rd_swapper_fb);
3557 if (val64 == 0x0123456789ABCDEFULL)
3558 break;
3559 i++;
3560 }
3561 if (i == 4) {
3562 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3563 "feedback read %llx\n",
3564 dev->name, (unsigned long long)val64);
3565 return FAILURE;
3566 }
3567 valr = value[i];
3568 } else {
3569 valr = readq(&bar0->swapper_ctrl);
3570 }
3571
3572 valt = 0x0123456789ABCDEFULL;
3573 writeq(valt, &bar0->xmsi_address);
3574 val64 = readq(&bar0->xmsi_address);
3575
3576 if (val64 != valt) {
3577 int i = 0;
3578 static const u64 value[] = {
3579 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3580 0x0081810000818100ULL, /* FE=1, SE=0 */
3581 0x0042420000424200ULL, /* FE=0, SE=1 */
3582 0 /* FE=0, SE=0 */
3583 };
3584
3585 while (i < 4) {
3586 writeq((value[i] | valr), &bar0->swapper_ctrl);
3587 writeq(valt, &bar0->xmsi_address);
3588 val64 = readq(&bar0->xmsi_address);
3589 if (val64 == valt)
3590 break;
3591 i++;
3592 }
3593 if (i == 4) {
3594 unsigned long long x = val64;
3595 DBG_PRINT(ERR_DBG,
3596 "Write failed, Xmsi_addr reads:0x%llx\n", x);
3597 return FAILURE;
3598 }
3599 }
3600 val64 = readq(&bar0->swapper_ctrl);
3601 val64 &= 0xFFFF000000000000ULL;
3602
3603#ifdef __BIG_ENDIAN
3604 /*
3605 * The device by default set to a big endian format, so a
3606 * big endian driver need not set anything.
3607 */
3608 val64 |= (SWAPPER_CTRL_TXP_FE |
3609 SWAPPER_CTRL_TXP_SE |
3610 SWAPPER_CTRL_TXD_R_FE |
3611 SWAPPER_CTRL_TXD_W_FE |
3612 SWAPPER_CTRL_TXF_R_FE |
3613 SWAPPER_CTRL_RXD_R_FE |
3614 SWAPPER_CTRL_RXD_W_FE |
3615 SWAPPER_CTRL_RXF_W_FE |
3616 SWAPPER_CTRL_XMSI_FE |
3617 SWAPPER_CTRL_STATS_FE |
3618 SWAPPER_CTRL_STATS_SE);
3619 if (sp->config.intr_type == INTA)
3620 val64 |= SWAPPER_CTRL_XMSI_SE;
3621 writeq(val64, &bar0->swapper_ctrl);
3622#else
3623 /*
3624 * Initially we enable all bits to make it accessible by the
3625 * driver, then we selectively enable only those bits that
3626 * we want to set.
3627 */
3628 val64 |= (SWAPPER_CTRL_TXP_FE |
3629 SWAPPER_CTRL_TXP_SE |
3630 SWAPPER_CTRL_TXD_R_FE |
3631 SWAPPER_CTRL_TXD_R_SE |
3632 SWAPPER_CTRL_TXD_W_FE |
3633 SWAPPER_CTRL_TXD_W_SE |
3634 SWAPPER_CTRL_TXF_R_FE |
3635 SWAPPER_CTRL_RXD_R_FE |
3636 SWAPPER_CTRL_RXD_R_SE |
3637 SWAPPER_CTRL_RXD_W_FE |
3638 SWAPPER_CTRL_RXD_W_SE |
3639 SWAPPER_CTRL_RXF_W_FE |
3640 SWAPPER_CTRL_XMSI_FE |
3641 SWAPPER_CTRL_STATS_FE |
3642 SWAPPER_CTRL_STATS_SE);
3643 if (sp->config.intr_type == INTA)
3644 val64 |= SWAPPER_CTRL_XMSI_SE;
3645 writeq(val64, &bar0->swapper_ctrl);
3646#endif
3647 val64 = readq(&bar0->swapper_ctrl);
3648
3649 /*
3650 * Verifying if endian settings are accurate by reading a
3651 * feedback register.
3652 */
3653 val64 = readq(&bar0->pif_rd_swapper_fb);
3654 if (val64 != 0x0123456789ABCDEFULL) {
3655 /* Endian settings are incorrect, calls for another dekko. */
3656 DBG_PRINT(ERR_DBG,
3657 "%s: Endian settings are wrong, feedback read %llx\n",
3658 dev->name, (unsigned long long)val64);
3659 return FAILURE;
3660 }
3661
3662 return SUCCESS;
3663}
3664
3665static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3666{
3667 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3668 u64 val64;
3669 int ret = 0, cnt = 0;
3670
3671 do {
3672 val64 = readq(&bar0->xmsi_access);
3673 if (!(val64 & s2BIT(15)))
3674 break;
3675 mdelay(1);
3676 cnt++;
3677 } while (cnt < 5);
3678 if (cnt == 5) {
3679 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3680 ret = 1;
3681 }
3682
3683 return ret;
3684}
3685
3686static void restore_xmsi_data(struct s2io_nic *nic)
3687{
3688 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3689 u64 val64;
3690 int i, msix_index;
3691
3692 if (nic->device_type == XFRAME_I_DEVICE)
3693 return;
3694
3695 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3696 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3697 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3698 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3699 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3700 writeq(val64, &bar0->xmsi_access);
3701 if (wait_for_msix_trans(nic, msix_index)) {
3702 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3703 __func__, msix_index);
3704 continue;
3705 }
3706 }
3707}
3708
3709static void store_xmsi_data(struct s2io_nic *nic)
3710{
3711 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3712 u64 val64, addr, data;
3713 int i, msix_index;
3714
3715 if (nic->device_type == XFRAME_I_DEVICE)
3716 return;
3717
3718 /* Store and display */
3719 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3720 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3721 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3722 writeq(val64, &bar0->xmsi_access);
3723 if (wait_for_msix_trans(nic, msix_index)) {
3724 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3725 __func__, msix_index);
3726 continue;
3727 }
3728 addr = readq(&bar0->xmsi_address);
3729 data = readq(&bar0->xmsi_data);
3730 if (addr && data) {
3731 nic->msix_info[i].addr = addr;
3732 nic->msix_info[i].data = data;
3733 }
3734 }
3735}
3736
3737static int s2io_enable_msi_x(struct s2io_nic *nic)
3738{
3739 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3740 u64 rx_mat;
3741 u16 msi_control; /* Temp variable */
3742 int ret, i, j, msix_indx = 1;
3743 int size;
3744 struct stat_block *stats = nic->mac_control.stats_info;
3745 struct swStat *swstats = &stats->sw_stat;
3746
3747 size = nic->num_entries * sizeof(struct msix_entry);
3748 nic->entries = kzalloc(size, GFP_KERNEL);
3749 if (!nic->entries) {
3750 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3751 __func__);
3752 swstats->mem_alloc_fail_cnt++;
3753 return -ENOMEM;
3754 }
3755 swstats->mem_allocated += size;
3756
3757 size = nic->num_entries * sizeof(struct s2io_msix_entry);
3758 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3759 if (!nic->s2io_entries) {
3760 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3761 __func__);
3762 swstats->mem_alloc_fail_cnt++;
3763 kfree(nic->entries);
3764 swstats->mem_freed
3765 += (nic->num_entries * sizeof(struct msix_entry));
3766 return -ENOMEM;
3767 }
3768 swstats->mem_allocated += size;
3769
3770 nic->entries[0].entry = 0;
3771 nic->s2io_entries[0].entry = 0;
3772 nic->s2io_entries[0].in_use = MSIX_FLG;
3773 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3774 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3775
3776 for (i = 1; i < nic->num_entries; i++) {
3777 nic->entries[i].entry = ((i - 1) * 8) + 1;
3778 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3779 nic->s2io_entries[i].arg = NULL;
3780 nic->s2io_entries[i].in_use = 0;
3781 }
3782
3783 rx_mat = readq(&bar0->rx_mat);
3784 for (j = 0; j < nic->config.rx_ring_num; j++) {
3785 rx_mat |= RX_MAT_SET(j, msix_indx);
3786 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3787 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3788 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3789 msix_indx += 8;
3790 }
3791 writeq(rx_mat, &bar0->rx_mat);
3792 readq(&bar0->rx_mat);
3793
3794 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3795 /* We fail init if error or we get less vectors than min required */
3796 if (ret) {
3797 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3798 kfree(nic->entries);
3799 swstats->mem_freed += nic->num_entries *
3800 sizeof(struct msix_entry);
3801 kfree(nic->s2io_entries);
3802 swstats->mem_freed += nic->num_entries *
3803 sizeof(struct s2io_msix_entry);
3804 nic->entries = NULL;
3805 nic->s2io_entries = NULL;
3806 return -ENOMEM;
3807 }
3808
3809 /*
3810 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3811 * in the herc NIC. (Temp change, needs to be removed later)
3812 */
3813 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3814 msi_control |= 0x1; /* Enable MSI */
3815 pci_write_config_word(nic->pdev, 0x42, msi_control);
3816
3817 return 0;
3818}
3819
3820/* Handle software interrupt used during MSI(X) test */
3821static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3822{
3823 struct s2io_nic *sp = dev_id;
3824
3825 sp->msi_detected = 1;
3826 wake_up(&sp->msi_wait);
3827
3828 return IRQ_HANDLED;
3829}
3830
3831/* Test interrupt path by forcing a a software IRQ */
3832static int s2io_test_msi(struct s2io_nic *sp)
3833{
3834 struct pci_dev *pdev = sp->pdev;
3835 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3836 int err;
3837 u64 val64, saved64;
3838
3839 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3840 sp->name, sp);
3841 if (err) {
3842 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3843 sp->dev->name, pci_name(pdev), pdev->irq);
3844 return err;
3845 }
3846
3847 init_waitqueue_head(&sp->msi_wait);
3848 sp->msi_detected = 0;
3849
3850 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3851 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3852 val64 |= SCHED_INT_CTRL_TIMER_EN;
3853 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3854 writeq(val64, &bar0->scheduled_int_ctrl);
3855
3856 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3857
3858 if (!sp->msi_detected) {
3859 /* MSI(X) test failed, go back to INTx mode */
3860 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3861 "using MSI(X) during test\n",
3862 sp->dev->name, pci_name(pdev));
3863
3864 err = -EOPNOTSUPP;
3865 }
3866
3867 free_irq(sp->entries[1].vector, sp);
3868
3869 writeq(saved64, &bar0->scheduled_int_ctrl);
3870
3871 return err;
3872}
3873
3874static void remove_msix_isr(struct s2io_nic *sp)
3875{
3876 int i;
3877 u16 msi_control;
3878
3879 for (i = 0; i < sp->num_entries; i++) {
3880 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3881 int vector = sp->entries[i].vector;
3882 void *arg = sp->s2io_entries[i].arg;
3883 free_irq(vector, arg);
3884 }
3885 }
3886
3887 kfree(sp->entries);
3888 kfree(sp->s2io_entries);
3889 sp->entries = NULL;
3890 sp->s2io_entries = NULL;
3891
3892 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3893 msi_control &= 0xFFFE; /* Disable MSI */
3894 pci_write_config_word(sp->pdev, 0x42, msi_control);
3895
3896 pci_disable_msix(sp->pdev);
3897}
3898
3899static void remove_inta_isr(struct s2io_nic *sp)
3900{
3901 struct net_device *dev = sp->dev;
3902
3903 free_irq(sp->pdev->irq, dev);
3904}
3905
3906/* ********************************************************* *
3907 * Functions defined below concern the OS part of the driver *
3908 * ********************************************************* */
3909
3910/**
3911 * s2io_open - open entry point of the driver
3912 * @dev : pointer to the device structure.
3913 * Description:
3914 * This function is the open entry point of the driver. It mainly calls a
3915 * function to allocate Rx buffers and inserts them into the buffer
3916 * descriptors and then enables the Rx part of the NIC.
3917 * Return value:
3918 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3919 * file on failure.
3920 */
3921
3922static int s2io_open(struct net_device *dev)
3923{
3924 struct s2io_nic *sp = netdev_priv(dev);
3925 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3926 int err = 0;
3927
3928 /*
3929 * Make sure you have link off by default every time
3930 * Nic is initialized
3931 */
3932 netif_carrier_off(dev);
3933 sp->last_link_state = 0;
3934
3935 /* Initialize H/W and enable interrupts */
3936 err = s2io_card_up(sp);
3937 if (err) {
3938 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3939 dev->name);
3940 goto hw_init_failed;
3941 }
3942
3943 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3944 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3945 s2io_card_down(sp);
3946 err = -ENODEV;
3947 goto hw_init_failed;
3948 }
3949 s2io_start_all_tx_queue(sp);
3950 return 0;
3951
3952hw_init_failed:
3953 if (sp->config.intr_type == MSI_X) {
3954 if (sp->entries) {
3955 kfree(sp->entries);
3956 swstats->mem_freed += sp->num_entries *
3957 sizeof(struct msix_entry);
3958 }
3959 if (sp->s2io_entries) {
3960 kfree(sp->s2io_entries);
3961 swstats->mem_freed += sp->num_entries *
3962 sizeof(struct s2io_msix_entry);
3963 }
3964 }
3965 return err;
3966}
3967
3968/**
3969 * s2io_close -close entry point of the driver
3970 * @dev : device pointer.
3971 * Description:
3972 * This is the stop entry point of the driver. It needs to undo exactly
3973 * whatever was done by the open entry point,thus it's usually referred to
3974 * as the close function.Among other things this function mainly stops the
3975 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3976 * Return value:
3977 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3978 * file on failure.
3979 */
3980
3981static int s2io_close(struct net_device *dev)
3982{
3983 struct s2io_nic *sp = netdev_priv(dev);
3984 struct config_param *config = &sp->config;
3985 u64 tmp64;
3986 int offset;
3987
3988 /* Return if the device is already closed *
3989 * Can happen when s2io_card_up failed in change_mtu *
3990 */
3991 if (!is_s2io_card_up(sp))
3992 return 0;
3993
3994 s2io_stop_all_tx_queue(sp);
3995 /* delete all populated mac entries */
3996 for (offset = 1; offset < config->max_mc_addr; offset++) {
3997 tmp64 = do_s2io_read_unicast_mc(sp, offset);
3998 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3999 do_s2io_delete_unicast_mc(sp, tmp64);
4000 }
4001
4002 s2io_card_down(sp);
4003
4004 return 0;
4005}
4006
4007/**
4008 * s2io_xmit - Tx entry point of te driver
4009 * @skb : the socket buffer containing the Tx data.
4010 * @dev : device pointer.
4011 * Description :
4012 * This function is the Tx entry point of the driver. S2IO NIC supports
4013 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4014 * NOTE: when device can't queue the pkt,just the trans_start variable will
4015 * not be upadted.
4016 * Return value:
4017 * 0 on success & 1 on failure.
4018 */
4019
4020static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4021{
4022 struct s2io_nic *sp = netdev_priv(dev);
4023 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4024 register u64 val64;
4025 struct TxD *txdp;
4026 struct TxFIFO_element __iomem *tx_fifo;
4027 unsigned long flags = 0;
4028 u16 vlan_tag = 0;
4029 struct fifo_info *fifo = NULL;
4030 int do_spin_lock = 1;
4031 int offload_type;
4032 int enable_per_list_interrupt = 0;
4033 struct config_param *config = &sp->config;
4034 struct mac_info *mac_control = &sp->mac_control;
4035 struct stat_block *stats = mac_control->stats_info;
4036 struct swStat *swstats = &stats->sw_stat;
4037
4038 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4039
4040 if (unlikely(skb->len <= 0)) {
4041 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4042 dev_kfree_skb_any(skb);
4043 return NETDEV_TX_OK;
4044 }
4045
4046 if (!is_s2io_card_up(sp)) {
4047 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4048 dev->name);
4049 dev_kfree_skb(skb);
4050 return NETDEV_TX_OK;
4051 }
4052
4053 queue = 0;
4054 if (vlan_tx_tag_present(skb))
4055 vlan_tag = vlan_tx_tag_get(skb);
4056 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4057 if (skb->protocol == htons(ETH_P_IP)) {
4058 struct iphdr *ip;
4059 struct tcphdr *th;
4060 ip = ip_hdr(skb);
4061
4062 if (!ip_is_fragment(ip)) {
4063 th = (struct tcphdr *)(((unsigned char *)ip) +
4064 ip->ihl*4);
4065
4066 if (ip->protocol == IPPROTO_TCP) {
4067 queue_len = sp->total_tcp_fifos;
4068 queue = (ntohs(th->source) +
4069 ntohs(th->dest)) &
4070 sp->fifo_selector[queue_len - 1];
4071 if (queue >= queue_len)
4072 queue = queue_len - 1;
4073 } else if (ip->protocol == IPPROTO_UDP) {
4074 queue_len = sp->total_udp_fifos;
4075 queue = (ntohs(th->source) +
4076 ntohs(th->dest)) &
4077 sp->fifo_selector[queue_len - 1];
4078 if (queue >= queue_len)
4079 queue = queue_len - 1;
4080 queue += sp->udp_fifo_idx;
4081 if (skb->len > 1024)
4082 enable_per_list_interrupt = 1;
4083 do_spin_lock = 0;
4084 }
4085 }
4086 }
4087 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4088 /* get fifo number based on skb->priority value */
4089 queue = config->fifo_mapping
4090 [skb->priority & (MAX_TX_FIFOS - 1)];
4091 fifo = &mac_control->fifos[queue];
4092
4093 if (do_spin_lock)
4094 spin_lock_irqsave(&fifo->tx_lock, flags);
4095 else {
4096 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4097 return NETDEV_TX_LOCKED;
4098 }
4099
4100 if (sp->config.multiq) {
4101 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4102 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4103 return NETDEV_TX_BUSY;
4104 }
4105 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4106 if (netif_queue_stopped(dev)) {
4107 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4108 return NETDEV_TX_BUSY;
4109 }
4110 }
4111
4112 put_off = (u16)fifo->tx_curr_put_info.offset;
4113 get_off = (u16)fifo->tx_curr_get_info.offset;
4114 txdp = fifo->list_info[put_off].list_virt_addr;
4115
4116 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4117 /* Avoid "put" pointer going beyond "get" pointer */
4118 if (txdp->Host_Control ||
4119 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4120 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4121 s2io_stop_tx_queue(sp, fifo->fifo_no);
4122 dev_kfree_skb(skb);
4123 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4124 return NETDEV_TX_OK;
4125 }
4126
4127 offload_type = s2io_offload_type(skb);
4128 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4129 txdp->Control_1 |= TXD_TCP_LSO_EN;
4130 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4131 }
4132 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4133 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4134 TXD_TX_CKO_TCP_EN |
4135 TXD_TX_CKO_UDP_EN);
4136 }
4137 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4138 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4139 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4140 if (enable_per_list_interrupt)
4141 if (put_off & (queue_len >> 5))
4142 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4143 if (vlan_tag) {
4144 txdp->Control_2 |= TXD_VLAN_ENABLE;
4145 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4146 }
4147
4148 frg_len = skb_headlen(skb);
4149 if (offload_type == SKB_GSO_UDP) {
4150 int ufo_size;
4151
4152 ufo_size = s2io_udp_mss(skb);
4153 ufo_size &= ~7;
4154 txdp->Control_1 |= TXD_UFO_EN;
4155 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4156 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4157#ifdef __BIG_ENDIAN
4158 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4159 fifo->ufo_in_band_v[put_off] =
4160 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4161#else
4162 fifo->ufo_in_band_v[put_off] =
4163 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4164#endif
4165 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4166 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4167 fifo->ufo_in_band_v,
4168 sizeof(u64),
4169 PCI_DMA_TODEVICE);
4170 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4171 goto pci_map_failed;
4172 txdp++;
4173 }
4174
4175 txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4176 frg_len, PCI_DMA_TODEVICE);
4177 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4178 goto pci_map_failed;
4179
4180 txdp->Host_Control = (unsigned long)skb;
4181 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4182 if (offload_type == SKB_GSO_UDP)
4183 txdp->Control_1 |= TXD_UFO_EN;
4184
4185 frg_cnt = skb_shinfo(skb)->nr_frags;
4186 /* For fragmented SKB. */
4187 for (i = 0; i < frg_cnt; i++) {
4188 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4189 /* A '0' length fragment will be ignored */
4190 if (!frag->size)
4191 continue;
4192 txdp++;
4193 txdp->Buffer_Pointer = (u64)pci_map_page(sp->pdev, frag->page,
4194 frag->page_offset,
4195 frag->size,
4196 PCI_DMA_TODEVICE);
4197 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4198 if (offload_type == SKB_GSO_UDP)
4199 txdp->Control_1 |= TXD_UFO_EN;
4200 }
4201 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4202
4203 if (offload_type == SKB_GSO_UDP)
4204 frg_cnt++; /* as Txd0 was used for inband header */
4205
4206 tx_fifo = mac_control->tx_FIFO_start[queue];
4207 val64 = fifo->list_info[put_off].list_phy_addr;
4208 writeq(val64, &tx_fifo->TxDL_Pointer);
4209
4210 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4211 TX_FIFO_LAST_LIST);
4212 if (offload_type)
4213 val64 |= TX_FIFO_SPECIAL_FUNC;
4214
4215 writeq(val64, &tx_fifo->List_Control);
4216
4217 mmiowb();
4218
4219 put_off++;
4220 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4221 put_off = 0;
4222 fifo->tx_curr_put_info.offset = put_off;
4223
4224 /* Avoid "put" pointer going beyond "get" pointer */
4225 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4226 swstats->fifo_full_cnt++;
4227 DBG_PRINT(TX_DBG,
4228 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4229 put_off, get_off);
4230 s2io_stop_tx_queue(sp, fifo->fifo_no);
4231 }
4232 swstats->mem_allocated += skb->truesize;
4233 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4234
4235 if (sp->config.intr_type == MSI_X)
4236 tx_intr_handler(fifo);
4237
4238 return NETDEV_TX_OK;
4239
4240pci_map_failed:
4241 swstats->pci_map_fail_cnt++;
4242 s2io_stop_tx_queue(sp, fifo->fifo_no);
4243 swstats->mem_freed += skb->truesize;
4244 dev_kfree_skb(skb);
4245 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4246 return NETDEV_TX_OK;
4247}
4248
4249static void
4250s2io_alarm_handle(unsigned long data)
4251{
4252 struct s2io_nic *sp = (struct s2io_nic *)data;
4253 struct net_device *dev = sp->dev;
4254
4255 s2io_handle_errors(dev);
4256 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4257}
4258
4259static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4260{
4261 struct ring_info *ring = (struct ring_info *)dev_id;
4262 struct s2io_nic *sp = ring->nic;
4263 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4264
4265 if (unlikely(!is_s2io_card_up(sp)))
4266 return IRQ_HANDLED;
4267
4268 if (sp->config.napi) {
4269 u8 __iomem *addr = NULL;
4270 u8 val8 = 0;
4271
4272 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4273 addr += (7 - ring->ring_no);
4274 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4275 writeb(val8, addr);
4276 val8 = readb(addr);
4277 napi_schedule(&ring->napi);
4278 } else {
4279 rx_intr_handler(ring, 0);
4280 s2io_chk_rx_buffers(sp, ring);
4281 }
4282
4283 return IRQ_HANDLED;
4284}
4285
4286static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4287{
4288 int i;
4289 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4290 struct s2io_nic *sp = fifos->nic;
4291 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4292 struct config_param *config = &sp->config;
4293 u64 reason;
4294
4295 if (unlikely(!is_s2io_card_up(sp)))
4296 return IRQ_NONE;
4297
4298 reason = readq(&bar0->general_int_status);
4299 if (unlikely(reason == S2IO_MINUS_ONE))
4300 /* Nothing much can be done. Get out */
4301 return IRQ_HANDLED;
4302
4303 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4304 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4305
4306 if (reason & GEN_INTR_TXPIC)
4307 s2io_txpic_intr_handle(sp);
4308
4309 if (reason & GEN_INTR_TXTRAFFIC)
4310 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4311
4312 for (i = 0; i < config->tx_fifo_num; i++)
4313 tx_intr_handler(&fifos[i]);
4314
4315 writeq(sp->general_int_mask, &bar0->general_int_mask);
4316 readl(&bar0->general_int_status);
4317 return IRQ_HANDLED;
4318 }
4319 /* The interrupt was not raised by us */
4320 return IRQ_NONE;
4321}
4322
4323static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4324{
4325 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4326 u64 val64;
4327
4328 val64 = readq(&bar0->pic_int_status);
4329 if (val64 & PIC_INT_GPIO) {
4330 val64 = readq(&bar0->gpio_int_reg);
4331 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4332 (val64 & GPIO_INT_REG_LINK_UP)) {
4333 /*
4334 * This is unstable state so clear both up/down
4335 * interrupt and adapter to re-evaluate the link state.
4336 */
4337 val64 |= GPIO_INT_REG_LINK_DOWN;
4338 val64 |= GPIO_INT_REG_LINK_UP;
4339 writeq(val64, &bar0->gpio_int_reg);
4340 val64 = readq(&bar0->gpio_int_mask);
4341 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4342 GPIO_INT_MASK_LINK_DOWN);
4343 writeq(val64, &bar0->gpio_int_mask);
4344 } else if (val64 & GPIO_INT_REG_LINK_UP) {
4345 val64 = readq(&bar0->adapter_status);
4346 /* Enable Adapter */
4347 val64 = readq(&bar0->adapter_control);
4348 val64 |= ADAPTER_CNTL_EN;
4349 writeq(val64, &bar0->adapter_control);
4350 val64 |= ADAPTER_LED_ON;
4351 writeq(val64, &bar0->adapter_control);
4352 if (!sp->device_enabled_once)
4353 sp->device_enabled_once = 1;
4354
4355 s2io_link(sp, LINK_UP);
4356 /*
4357 * unmask link down interrupt and mask link-up
4358 * intr
4359 */
4360 val64 = readq(&bar0->gpio_int_mask);
4361 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4362 val64 |= GPIO_INT_MASK_LINK_UP;
4363 writeq(val64, &bar0->gpio_int_mask);
4364
4365 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4366 val64 = readq(&bar0->adapter_status);
4367 s2io_link(sp, LINK_DOWN);
4368 /* Link is down so unmaks link up interrupt */
4369 val64 = readq(&bar0->gpio_int_mask);
4370 val64 &= ~GPIO_INT_MASK_LINK_UP;
4371 val64 |= GPIO_INT_MASK_LINK_DOWN;
4372 writeq(val64, &bar0->gpio_int_mask);
4373
4374 /* turn off LED */
4375 val64 = readq(&bar0->adapter_control);
4376 val64 = val64 & (~ADAPTER_LED_ON);
4377 writeq(val64, &bar0->adapter_control);
4378 }
4379 }
4380 val64 = readq(&bar0->gpio_int_mask);
4381}
4382
4383/**
4384 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4385 * @value: alarm bits
4386 * @addr: address value
4387 * @cnt: counter variable
4388 * Description: Check for alarm and increment the counter
4389 * Return Value:
4390 * 1 - if alarm bit set
4391 * 0 - if alarm bit is not set
4392 */
4393static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4394 unsigned long long *cnt)
4395{
4396 u64 val64;
4397 val64 = readq(addr);
4398 if (val64 & value) {
4399 writeq(val64, addr);
4400 (*cnt)++;
4401 return 1;
4402 }
4403 return 0;
4404
4405}
4406
4407/**
4408 * s2io_handle_errors - Xframe error indication handler
4409 * @nic: device private variable
4410 * Description: Handle alarms such as loss of link, single or
4411 * double ECC errors, critical and serious errors.
4412 * Return Value:
4413 * NONE
4414 */
4415static void s2io_handle_errors(void *dev_id)
4416{
4417 struct net_device *dev = (struct net_device *)dev_id;
4418 struct s2io_nic *sp = netdev_priv(dev);
4419 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4420 u64 temp64 = 0, val64 = 0;
4421 int i = 0;
4422
4423 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4424 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4425
4426 if (!is_s2io_card_up(sp))
4427 return;
4428
4429 if (pci_channel_offline(sp->pdev))
4430 return;
4431
4432 memset(&sw_stat->ring_full_cnt, 0,
4433 sizeof(sw_stat->ring_full_cnt));
4434
4435 /* Handling the XPAK counters update */
4436 if (stats->xpak_timer_count < 72000) {
4437 /* waiting for an hour */
4438 stats->xpak_timer_count++;
4439 } else {
4440 s2io_updt_xpak_counter(dev);
4441 /* reset the count to zero */
4442 stats->xpak_timer_count = 0;
4443 }
4444
4445 /* Handling link status change error Intr */
4446 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4447 val64 = readq(&bar0->mac_rmac_err_reg);
4448 writeq(val64, &bar0->mac_rmac_err_reg);
4449 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4450 schedule_work(&sp->set_link_task);
4451 }
4452
4453 /* In case of a serious error, the device will be Reset. */
4454 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4455 &sw_stat->serious_err_cnt))
4456 goto reset;
4457
4458 /* Check for data parity error */
4459 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4460 &sw_stat->parity_err_cnt))
4461 goto reset;
4462
4463 /* Check for ring full counter */
4464 if (sp->device_type == XFRAME_II_DEVICE) {
4465 val64 = readq(&bar0->ring_bump_counter1);
4466 for (i = 0; i < 4; i++) {
4467 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4468 temp64 >>= 64 - ((i+1)*16);
4469 sw_stat->ring_full_cnt[i] += temp64;
4470 }
4471
4472 val64 = readq(&bar0->ring_bump_counter2);
4473 for (i = 0; i < 4; i++) {
4474 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4475 temp64 >>= 64 - ((i+1)*16);
4476 sw_stat->ring_full_cnt[i+4] += temp64;
4477 }
4478 }
4479
4480 val64 = readq(&bar0->txdma_int_status);
4481 /*check for pfc_err*/
4482 if (val64 & TXDMA_PFC_INT) {
4483 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4484 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4485 PFC_PCIX_ERR,
4486 &bar0->pfc_err_reg,
4487 &sw_stat->pfc_err_cnt))
4488 goto reset;
4489 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4490 &bar0->pfc_err_reg,
4491 &sw_stat->pfc_err_cnt);
4492 }
4493
4494 /*check for tda_err*/
4495 if (val64 & TXDMA_TDA_INT) {
4496 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4497 TDA_SM0_ERR_ALARM |
4498 TDA_SM1_ERR_ALARM,
4499 &bar0->tda_err_reg,
4500 &sw_stat->tda_err_cnt))
4501 goto reset;
4502 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4503 &bar0->tda_err_reg,
4504 &sw_stat->tda_err_cnt);
4505 }
4506 /*check for pcc_err*/
4507 if (val64 & TXDMA_PCC_INT) {
4508 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4509 PCC_N_SERR | PCC_6_COF_OV_ERR |
4510 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4511 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4512 PCC_TXB_ECC_DB_ERR,
4513 &bar0->pcc_err_reg,
4514 &sw_stat->pcc_err_cnt))
4515 goto reset;
4516 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4517 &bar0->pcc_err_reg,
4518 &sw_stat->pcc_err_cnt);
4519 }
4520
4521 /*check for tti_err*/
4522 if (val64 & TXDMA_TTI_INT) {
4523 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4524 &bar0->tti_err_reg,
4525 &sw_stat->tti_err_cnt))
4526 goto reset;
4527 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4528 &bar0->tti_err_reg,
4529 &sw_stat->tti_err_cnt);
4530 }
4531
4532 /*check for lso_err*/
4533 if (val64 & TXDMA_LSO_INT) {
4534 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4535 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4536 &bar0->lso_err_reg,
4537 &sw_stat->lso_err_cnt))
4538 goto reset;
4539 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4540 &bar0->lso_err_reg,
4541 &sw_stat->lso_err_cnt);
4542 }
4543
4544 /*check for tpa_err*/
4545 if (val64 & TXDMA_TPA_INT) {
4546 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4547 &bar0->tpa_err_reg,
4548 &sw_stat->tpa_err_cnt))
4549 goto reset;
4550 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4551 &bar0->tpa_err_reg,
4552 &sw_stat->tpa_err_cnt);
4553 }
4554
4555 /*check for sm_err*/
4556 if (val64 & TXDMA_SM_INT) {
4557 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4558 &bar0->sm_err_reg,
4559 &sw_stat->sm_err_cnt))
4560 goto reset;
4561 }
4562
4563 val64 = readq(&bar0->mac_int_status);
4564 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4565 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4566 &bar0->mac_tmac_err_reg,
4567 &sw_stat->mac_tmac_err_cnt))
4568 goto reset;
4569 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4570 TMAC_DESC_ECC_SG_ERR |
4571 TMAC_DESC_ECC_DB_ERR,
4572 &bar0->mac_tmac_err_reg,
4573 &sw_stat->mac_tmac_err_cnt);
4574 }
4575
4576 val64 = readq(&bar0->xgxs_int_status);
4577 if (val64 & XGXS_INT_STATUS_TXGXS) {
4578 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4579 &bar0->xgxs_txgxs_err_reg,
4580 &sw_stat->xgxs_txgxs_err_cnt))
4581 goto reset;
4582 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4583 &bar0->xgxs_txgxs_err_reg,
4584 &sw_stat->xgxs_txgxs_err_cnt);
4585 }
4586
4587 val64 = readq(&bar0->rxdma_int_status);
4588 if (val64 & RXDMA_INT_RC_INT_M) {
4589 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4590 RC_FTC_ECC_DB_ERR |
4591 RC_PRCn_SM_ERR_ALARM |
4592 RC_FTC_SM_ERR_ALARM,
4593 &bar0->rc_err_reg,
4594 &sw_stat->rc_err_cnt))
4595 goto reset;
4596 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4597 RC_FTC_ECC_SG_ERR |
4598 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4599 &sw_stat->rc_err_cnt);
4600 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4601 PRC_PCI_AB_WR_Rn |
4602 PRC_PCI_AB_F_WR_Rn,
4603 &bar0->prc_pcix_err_reg,
4604 &sw_stat->prc_pcix_err_cnt))
4605 goto reset;
4606 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4607 PRC_PCI_DP_WR_Rn |
4608 PRC_PCI_DP_F_WR_Rn,
4609 &bar0->prc_pcix_err_reg,
4610 &sw_stat->prc_pcix_err_cnt);
4611 }
4612
4613 if (val64 & RXDMA_INT_RPA_INT_M) {
4614 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4615 &bar0->rpa_err_reg,
4616 &sw_stat->rpa_err_cnt))
4617 goto reset;
4618 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4619 &bar0->rpa_err_reg,
4620 &sw_stat->rpa_err_cnt);
4621 }
4622
4623 if (val64 & RXDMA_INT_RDA_INT_M) {
4624 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4625 RDA_FRM_ECC_DB_N_AERR |
4626 RDA_SM1_ERR_ALARM |
4627 RDA_SM0_ERR_ALARM |
4628 RDA_RXD_ECC_DB_SERR,
4629 &bar0->rda_err_reg,
4630 &sw_stat->rda_err_cnt))
4631 goto reset;
4632 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4633 RDA_FRM_ECC_SG_ERR |
4634 RDA_MISC_ERR |
4635 RDA_PCIX_ERR,
4636 &bar0->rda_err_reg,
4637 &sw_stat->rda_err_cnt);
4638 }
4639
4640 if (val64 & RXDMA_INT_RTI_INT_M) {
4641 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4642 &bar0->rti_err_reg,
4643 &sw_stat->rti_err_cnt))
4644 goto reset;
4645 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4646 &bar0->rti_err_reg,
4647 &sw_stat->rti_err_cnt);
4648 }
4649
4650 val64 = readq(&bar0->mac_int_status);
4651 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4652 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4653 &bar0->mac_rmac_err_reg,
4654 &sw_stat->mac_rmac_err_cnt))
4655 goto reset;
4656 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4657 RMAC_SINGLE_ECC_ERR |
4658 RMAC_DOUBLE_ECC_ERR,
4659 &bar0->mac_rmac_err_reg,
4660 &sw_stat->mac_rmac_err_cnt);
4661 }
4662
4663 val64 = readq(&bar0->xgxs_int_status);
4664 if (val64 & XGXS_INT_STATUS_RXGXS) {
4665 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4666 &bar0->xgxs_rxgxs_err_reg,
4667 &sw_stat->xgxs_rxgxs_err_cnt))
4668 goto reset;
4669 }
4670
4671 val64 = readq(&bar0->mc_int_status);
4672 if (val64 & MC_INT_STATUS_MC_INT) {
4673 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4674 &bar0->mc_err_reg,
4675 &sw_stat->mc_err_cnt))
4676 goto reset;
4677
4678 /* Handling Ecc errors */
4679 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4680 writeq(val64, &bar0->mc_err_reg);
4681 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4682 sw_stat->double_ecc_errs++;
4683 if (sp->device_type != XFRAME_II_DEVICE) {
4684 /*
4685 * Reset XframeI only if critical error
4686 */
4687 if (val64 &
4688 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4689 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4690 goto reset;
4691 }
4692 } else
4693 sw_stat->single_ecc_errs++;
4694 }
4695 }
4696 return;
4697
4698reset:
4699 s2io_stop_all_tx_queue(sp);
4700 schedule_work(&sp->rst_timer_task);
4701 sw_stat->soft_reset_cnt++;
4702}
4703
4704/**
4705 * s2io_isr - ISR handler of the device .
4706 * @irq: the irq of the device.
4707 * @dev_id: a void pointer to the dev structure of the NIC.
4708 * Description: This function is the ISR handler of the device. It
4709 * identifies the reason for the interrupt and calls the relevant
4710 * service routines. As a contongency measure, this ISR allocates the
4711 * recv buffers, if their numbers are below the panic value which is
4712 * presently set to 25% of the original number of rcv buffers allocated.
4713 * Return value:
4714 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4715 * IRQ_NONE: will be returned if interrupt is not from our device
4716 */
4717static irqreturn_t s2io_isr(int irq, void *dev_id)
4718{
4719 struct net_device *dev = (struct net_device *)dev_id;
4720 struct s2io_nic *sp = netdev_priv(dev);
4721 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4722 int i;
4723 u64 reason = 0;
4724 struct mac_info *mac_control;
4725 struct config_param *config;
4726
4727 /* Pretend we handled any irq's from a disconnected card */
4728 if (pci_channel_offline(sp->pdev))
4729 return IRQ_NONE;
4730
4731 if (!is_s2io_card_up(sp))
4732 return IRQ_NONE;
4733
4734 config = &sp->config;
4735 mac_control = &sp->mac_control;
4736
4737 /*
4738 * Identify the cause for interrupt and call the appropriate
4739 * interrupt handler. Causes for the interrupt could be;
4740 * 1. Rx of packet.
4741 * 2. Tx complete.
4742 * 3. Link down.
4743 */
4744 reason = readq(&bar0->general_int_status);
4745
4746 if (unlikely(reason == S2IO_MINUS_ONE))
4747 return IRQ_HANDLED; /* Nothing much can be done. Get out */
4748
4749 if (reason &
4750 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4751 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4752
4753 if (config->napi) {
4754 if (reason & GEN_INTR_RXTRAFFIC) {
4755 napi_schedule(&sp->napi);
4756 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4757 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4758 readl(&bar0->rx_traffic_int);
4759 }
4760 } else {
4761 /*
4762 * rx_traffic_int reg is an R1 register, writing all 1's
4763 * will ensure that the actual interrupt causing bit
4764 * get's cleared and hence a read can be avoided.
4765 */
4766 if (reason & GEN_INTR_RXTRAFFIC)
4767 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4768
4769 for (i = 0; i < config->rx_ring_num; i++) {
4770 struct ring_info *ring = &mac_control->rings[i];
4771
4772 rx_intr_handler(ring, 0);
4773 }
4774 }
4775
4776 /*
4777 * tx_traffic_int reg is an R1 register, writing all 1's
4778 * will ensure that the actual interrupt causing bit get's
4779 * cleared and hence a read can be avoided.
4780 */
4781 if (reason & GEN_INTR_TXTRAFFIC)
4782 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4783
4784 for (i = 0; i < config->tx_fifo_num; i++)
4785 tx_intr_handler(&mac_control->fifos[i]);
4786
4787 if (reason & GEN_INTR_TXPIC)
4788 s2io_txpic_intr_handle(sp);
4789
4790 /*
4791 * Reallocate the buffers from the interrupt handler itself.
4792 */
4793 if (!config->napi) {
4794 for (i = 0; i < config->rx_ring_num; i++) {
4795 struct ring_info *ring = &mac_control->rings[i];
4796
4797 s2io_chk_rx_buffers(sp, ring);
4798 }
4799 }
4800 writeq(sp->general_int_mask, &bar0->general_int_mask);
4801 readl(&bar0->general_int_status);
4802
4803 return IRQ_HANDLED;
4804
4805 } else if (!reason) {
4806 /* The interrupt was not raised by us */
4807 return IRQ_NONE;
4808 }
4809
4810 return IRQ_HANDLED;
4811}
4812
4813/**
4814 * s2io_updt_stats -
4815 */
4816static void s2io_updt_stats(struct s2io_nic *sp)
4817{
4818 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4819 u64 val64;
4820 int cnt = 0;
4821
4822 if (is_s2io_card_up(sp)) {
4823 /* Apprx 30us on a 133 MHz bus */
4824 val64 = SET_UPDT_CLICKS(10) |
4825 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4826 writeq(val64, &bar0->stat_cfg);
4827 do {
4828 udelay(100);
4829 val64 = readq(&bar0->stat_cfg);
4830 if (!(val64 & s2BIT(0)))
4831 break;
4832 cnt++;
4833 if (cnt == 5)
4834 break; /* Updt failed */
4835 } while (1);
4836 }
4837}
4838
4839/**
4840 * s2io_get_stats - Updates the device statistics structure.
4841 * @dev : pointer to the device structure.
4842 * Description:
4843 * This function updates the device statistics structure in the s2io_nic
4844 * structure and returns a pointer to the same.
4845 * Return value:
4846 * pointer to the updated net_device_stats structure.
4847 */
4848static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4849{
4850 struct s2io_nic *sp = netdev_priv(dev);
4851 struct mac_info *mac_control = &sp->mac_control;
4852 struct stat_block *stats = mac_control->stats_info;
4853 u64 delta;
4854
4855 /* Configure Stats for immediate updt */
4856 s2io_updt_stats(sp);
4857
4858 /* A device reset will cause the on-adapter statistics to be zero'ed.
4859 * This can be done while running by changing the MTU. To prevent the
4860 * system from having the stats zero'ed, the driver keeps a copy of the
4861 * last update to the system (which is also zero'ed on reset). This
4862 * enables the driver to accurately know the delta between the last
4863 * update and the current update.
4864 */
4865 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4866 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4867 sp->stats.rx_packets += delta;
4868 dev->stats.rx_packets += delta;
4869
4870 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4871 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4872 sp->stats.tx_packets += delta;
4873 dev->stats.tx_packets += delta;
4874
4875 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4876 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4877 sp->stats.rx_bytes += delta;
4878 dev->stats.rx_bytes += delta;
4879
4880 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4881 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4882 sp->stats.tx_bytes += delta;
4883 dev->stats.tx_bytes += delta;
4884
4885 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4886 sp->stats.rx_errors += delta;
4887 dev->stats.rx_errors += delta;
4888
4889 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4890 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4891 sp->stats.tx_errors += delta;
4892 dev->stats.tx_errors += delta;
4893
4894 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4895 sp->stats.rx_dropped += delta;
4896 dev->stats.rx_dropped += delta;
4897
4898 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4899 sp->stats.tx_dropped += delta;
4900 dev->stats.tx_dropped += delta;
4901
4902 /* The adapter MAC interprets pause frames as multicast packets, but
4903 * does not pass them up. This erroneously increases the multicast
4904 * packet count and needs to be deducted when the multicast frame count
4905 * is queried.
4906 */
4907 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4908 le32_to_cpu(stats->rmac_vld_mcst_frms);
4909 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4910 delta -= sp->stats.multicast;
4911 sp->stats.multicast += delta;
4912 dev->stats.multicast += delta;
4913
4914 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4915 le32_to_cpu(stats->rmac_usized_frms)) +
4916 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4917 sp->stats.rx_length_errors += delta;
4918 dev->stats.rx_length_errors += delta;
4919
4920 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4921 sp->stats.rx_crc_errors += delta;
4922 dev->stats.rx_crc_errors += delta;
4923
4924 return &dev->stats;
4925}
4926
4927/**
4928 * s2io_set_multicast - entry point for multicast address enable/disable.
4929 * @dev : pointer to the device structure
4930 * Description:
4931 * This function is a driver entry point which gets called by the kernel
4932 * whenever multicast addresses must be enabled/disabled. This also gets
4933 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4934 * determine, if multicast address must be enabled or if promiscuous mode
4935 * is to be disabled etc.
4936 * Return value:
4937 * void.
4938 */
4939
4940static void s2io_set_multicast(struct net_device *dev)
4941{
4942 int i, j, prev_cnt;
4943 struct netdev_hw_addr *ha;
4944 struct s2io_nic *sp = netdev_priv(dev);
4945 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4946 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4947 0xfeffffffffffULL;
4948 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4949 void __iomem *add;
4950 struct config_param *config = &sp->config;
4951
4952 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4953 /* Enable all Multicast addresses */
4954 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4955 &bar0->rmac_addr_data0_mem);
4956 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4957 &bar0->rmac_addr_data1_mem);
4958 val64 = RMAC_ADDR_CMD_MEM_WE |
4959 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4960 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4961 writeq(val64, &bar0->rmac_addr_cmd_mem);
4962 /* Wait till command completes */
4963 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4964 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4965 S2IO_BIT_RESET);
4966
4967 sp->m_cast_flg = 1;
4968 sp->all_multi_pos = config->max_mc_addr - 1;
4969 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4970 /* Disable all Multicast addresses */
4971 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4972 &bar0->rmac_addr_data0_mem);
4973 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4974 &bar0->rmac_addr_data1_mem);
4975 val64 = RMAC_ADDR_CMD_MEM_WE |
4976 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4977 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4978 writeq(val64, &bar0->rmac_addr_cmd_mem);
4979 /* Wait till command completes */
4980 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4981 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4982 S2IO_BIT_RESET);
4983
4984 sp->m_cast_flg = 0;
4985 sp->all_multi_pos = 0;
4986 }
4987
4988 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4989 /* Put the NIC into promiscuous mode */
4990 add = &bar0->mac_cfg;
4991 val64 = readq(&bar0->mac_cfg);
4992 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4993
4994 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4995 writel((u32)val64, add);
4996 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4997 writel((u32) (val64 >> 32), (add + 4));
4998
4999 if (vlan_tag_strip != 1) {
5000 val64 = readq(&bar0->rx_pa_cfg);
5001 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5002 writeq(val64, &bar0->rx_pa_cfg);
5003 sp->vlan_strip_flag = 0;
5004 }
5005
5006 val64 = readq(&bar0->mac_cfg);
5007 sp->promisc_flg = 1;
5008 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5009 dev->name);
5010 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5011 /* Remove the NIC from promiscuous mode */
5012 add = &bar0->mac_cfg;
5013 val64 = readq(&bar0->mac_cfg);
5014 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5015
5016 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5017 writel((u32)val64, add);
5018 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5019 writel((u32) (val64 >> 32), (add + 4));
5020
5021 if (vlan_tag_strip != 0) {
5022 val64 = readq(&bar0->rx_pa_cfg);
5023 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5024 writeq(val64, &bar0->rx_pa_cfg);
5025 sp->vlan_strip_flag = 1;
5026 }
5027
5028 val64 = readq(&bar0->mac_cfg);
5029 sp->promisc_flg = 0;
5030 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
5031 }
5032
5033 /* Update individual M_CAST address list */
5034 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5035 if (netdev_mc_count(dev) >
5036 (config->max_mc_addr - config->max_mac_addr)) {
5037 DBG_PRINT(ERR_DBG,
5038 "%s: No more Rx filters can be added - "
5039 "please enable ALL_MULTI instead\n",
5040 dev->name);
5041 return;
5042 }
5043
5044 prev_cnt = sp->mc_addr_count;
5045 sp->mc_addr_count = netdev_mc_count(dev);
5046
5047 /* Clear out the previous list of Mc in the H/W. */
5048 for (i = 0; i < prev_cnt; i++) {
5049 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5050 &bar0->rmac_addr_data0_mem);
5051 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5052 &bar0->rmac_addr_data1_mem);
5053 val64 = RMAC_ADDR_CMD_MEM_WE |
5054 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5055 RMAC_ADDR_CMD_MEM_OFFSET
5056 (config->mc_start_offset + i);
5057 writeq(val64, &bar0->rmac_addr_cmd_mem);
5058
5059 /* Wait for command completes */
5060 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5061 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5062 S2IO_BIT_RESET)) {
5063 DBG_PRINT(ERR_DBG,
5064 "%s: Adding Multicasts failed\n",
5065 dev->name);
5066 return;
5067 }
5068 }
5069
5070 /* Create the new Rx filter list and update the same in H/W. */
5071 i = 0;
5072 netdev_for_each_mc_addr(ha, dev) {
5073 mac_addr = 0;
5074 for (j = 0; j < ETH_ALEN; j++) {
5075 mac_addr |= ha->addr[j];
5076 mac_addr <<= 8;
5077 }
5078 mac_addr >>= 8;
5079 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5080 &bar0->rmac_addr_data0_mem);
5081 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5082 &bar0->rmac_addr_data1_mem);
5083 val64 = RMAC_ADDR_CMD_MEM_WE |
5084 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5085 RMAC_ADDR_CMD_MEM_OFFSET
5086 (i + config->mc_start_offset);
5087 writeq(val64, &bar0->rmac_addr_cmd_mem);
5088
5089 /* Wait for command completes */
5090 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5091 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5092 S2IO_BIT_RESET)) {
5093 DBG_PRINT(ERR_DBG,
5094 "%s: Adding Multicasts failed\n",
5095 dev->name);
5096 return;
5097 }
5098 i++;
5099 }
5100 }
5101}
5102
5103/* read from CAM unicast & multicast addresses and store it in
5104 * def_mac_addr structure
5105 */
5106static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5107{
5108 int offset;
5109 u64 mac_addr = 0x0;
5110 struct config_param *config = &sp->config;
5111
5112 /* store unicast & multicast mac addresses */
5113 for (offset = 0; offset < config->max_mc_addr; offset++) {
5114 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5115 /* if read fails disable the entry */
5116 if (mac_addr == FAILURE)
5117 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5118 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5119 }
5120}
5121
5122/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5123static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5124{
5125 int offset;
5126 struct config_param *config = &sp->config;
5127 /* restore unicast mac address */
5128 for (offset = 0; offset < config->max_mac_addr; offset++)
5129 do_s2io_prog_unicast(sp->dev,
5130 sp->def_mac_addr[offset].mac_addr);
5131
5132 /* restore multicast mac address */
5133 for (offset = config->mc_start_offset;
5134 offset < config->max_mc_addr; offset++)
5135 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5136}
5137
5138/* add a multicast MAC address to CAM */
5139static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5140{
5141 int i;
5142 u64 mac_addr = 0;
5143 struct config_param *config = &sp->config;
5144
5145 for (i = 0; i < ETH_ALEN; i++) {
5146 mac_addr <<= 8;
5147 mac_addr |= addr[i];
5148 }
5149 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5150 return SUCCESS;
5151
5152 /* check if the multicast mac already preset in CAM */
5153 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5154 u64 tmp64;
5155 tmp64 = do_s2io_read_unicast_mc(sp, i);
5156 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5157 break;
5158
5159 if (tmp64 == mac_addr)
5160 return SUCCESS;
5161 }
5162 if (i == config->max_mc_addr) {
5163 DBG_PRINT(ERR_DBG,
5164 "CAM full no space left for multicast MAC\n");
5165 return FAILURE;
5166 }
5167 /* Update the internal structure with this new mac address */
5168 do_s2io_copy_mac_addr(sp, i, mac_addr);
5169
5170 return do_s2io_add_mac(sp, mac_addr, i);
5171}
5172
5173/* add MAC address to CAM */
5174static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5175{
5176 u64 val64;
5177 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5178
5179 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5180 &bar0->rmac_addr_data0_mem);
5181
5182 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5183 RMAC_ADDR_CMD_MEM_OFFSET(off);
5184 writeq(val64, &bar0->rmac_addr_cmd_mem);
5185
5186 /* Wait till command completes */
5187 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5188 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5189 S2IO_BIT_RESET)) {
5190 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5191 return FAILURE;
5192 }
5193 return SUCCESS;
5194}
5195/* deletes a specified unicast/multicast mac entry from CAM */
5196static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5197{
5198 int offset;
5199 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5200 struct config_param *config = &sp->config;
5201
5202 for (offset = 1;
5203 offset < config->max_mc_addr; offset++) {
5204 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5205 if (tmp64 == addr) {
5206 /* disable the entry by writing 0xffffffffffffULL */
5207 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5208 return FAILURE;
5209 /* store the new mac list from CAM */
5210 do_s2io_store_unicast_mc(sp);
5211 return SUCCESS;
5212 }
5213 }
5214 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5215 (unsigned long long)addr);
5216 return FAILURE;
5217}
5218
5219/* read mac entries from CAM */
5220static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5221{
5222 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5223 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5224
5225 /* read mac addr */
5226 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5227 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5228 writeq(val64, &bar0->rmac_addr_cmd_mem);
5229
5230 /* Wait till command completes */
5231 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5232 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5233 S2IO_BIT_RESET)) {
5234 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5235 return FAILURE;
5236 }
5237 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5238
5239 return tmp64 >> 16;
5240}
5241
5242/**
5243 * s2io_set_mac_addr driver entry point
5244 */
5245
5246static int s2io_set_mac_addr(struct net_device *dev, void *p)
5247{
5248 struct sockaddr *addr = p;
5249
5250 if (!is_valid_ether_addr(addr->sa_data))
5251 return -EINVAL;
5252
5253 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5254
5255 /* store the MAC address in CAM */
5256 return do_s2io_prog_unicast(dev, dev->dev_addr);
5257}
5258/**
5259 * do_s2io_prog_unicast - Programs the Xframe mac address
5260 * @dev : pointer to the device structure.
5261 * @addr: a uchar pointer to the new mac address which is to be set.
5262 * Description : This procedure will program the Xframe to receive
5263 * frames with new Mac Address
5264 * Return value: SUCCESS on success and an appropriate (-)ve integer
5265 * as defined in errno.h file on failure.
5266 */
5267
5268static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5269{
5270 struct s2io_nic *sp = netdev_priv(dev);
5271 register u64 mac_addr = 0, perm_addr = 0;
5272 int i;
5273 u64 tmp64;
5274 struct config_param *config = &sp->config;
5275
5276 /*
5277 * Set the new MAC address as the new unicast filter and reflect this
5278 * change on the device address registered with the OS. It will be
5279 * at offset 0.
5280 */
5281 for (i = 0; i < ETH_ALEN; i++) {
5282 mac_addr <<= 8;
5283 mac_addr |= addr[i];
5284 perm_addr <<= 8;
5285 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5286 }
5287
5288 /* check if the dev_addr is different than perm_addr */
5289 if (mac_addr == perm_addr)
5290 return SUCCESS;
5291
5292 /* check if the mac already preset in CAM */
5293 for (i = 1; i < config->max_mac_addr; i++) {
5294 tmp64 = do_s2io_read_unicast_mc(sp, i);
5295 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5296 break;
5297
5298 if (tmp64 == mac_addr) {
5299 DBG_PRINT(INFO_DBG,
5300 "MAC addr:0x%llx already present in CAM\n",
5301 (unsigned long long)mac_addr);
5302 return SUCCESS;
5303 }
5304 }
5305 if (i == config->max_mac_addr) {
5306 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5307 return FAILURE;
5308 }
5309 /* Update the internal structure with this new mac address */
5310 do_s2io_copy_mac_addr(sp, i, mac_addr);
5311
5312 return do_s2io_add_mac(sp, mac_addr, i);
5313}
5314
5315/**
5316 * s2io_ethtool_sset - Sets different link parameters.
5317 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5318 * @info: pointer to the structure with parameters given by ethtool to set
5319 * link information.
5320 * Description:
5321 * The function sets different link parameters provided by the user onto
5322 * the NIC.
5323 * Return value:
5324 * 0 on success.
5325 */
5326
5327static int s2io_ethtool_sset(struct net_device *dev,
5328 struct ethtool_cmd *info)
5329{
5330 struct s2io_nic *sp = netdev_priv(dev);
5331 if ((info->autoneg == AUTONEG_ENABLE) ||
5332 (ethtool_cmd_speed(info) != SPEED_10000) ||
5333 (info->duplex != DUPLEX_FULL))
5334 return -EINVAL;
5335 else {
5336 s2io_close(sp->dev);
5337 s2io_open(sp->dev);
5338 }
5339
5340 return 0;
5341}
5342
5343/**
5344 * s2io_ethtol_gset - Return link specific information.
5345 * @sp : private member of the device structure, pointer to the
5346 * s2io_nic structure.
5347 * @info : pointer to the structure with parameters given by ethtool
5348 * to return link information.
5349 * Description:
5350 * Returns link specific information like speed, duplex etc.. to ethtool.
5351 * Return value :
5352 * return 0 on success.
5353 */
5354
5355static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5356{
5357 struct s2io_nic *sp = netdev_priv(dev);
5358 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5359 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5360 info->port = PORT_FIBRE;
5361
5362 /* info->transceiver */
5363 info->transceiver = XCVR_EXTERNAL;
5364
5365 if (netif_carrier_ok(sp->dev)) {
5366 ethtool_cmd_speed_set(info, SPEED_10000);
5367 info->duplex = DUPLEX_FULL;
5368 } else {
5369 ethtool_cmd_speed_set(info, -1);
5370 info->duplex = -1;
5371 }
5372
5373 info->autoneg = AUTONEG_DISABLE;
5374 return 0;
5375}
5376
5377/**
5378 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5379 * @sp : private member of the device structure, which is a pointer to the
5380 * s2io_nic structure.
5381 * @info : pointer to the structure with parameters given by ethtool to
5382 * return driver information.
5383 * Description:
5384 * Returns driver specefic information like name, version etc.. to ethtool.
5385 * Return value:
5386 * void
5387 */
5388
5389static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5390 struct ethtool_drvinfo *info)
5391{
5392 struct s2io_nic *sp = netdev_priv(dev);
5393
5394 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5395 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5396 strncpy(info->fw_version, "", sizeof(info->fw_version));
5397 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5398 info->regdump_len = XENA_REG_SPACE;
5399 info->eedump_len = XENA_EEPROM_SPACE;
5400}
5401
5402/**
5403 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5404 * @sp: private member of the device structure, which is a pointer to the
5405 * s2io_nic structure.
5406 * @regs : pointer to the structure with parameters given by ethtool for
5407 * dumping the registers.
5408 * @reg_space: The input argumnet into which all the registers are dumped.
5409 * Description:
5410 * Dumps the entire register space of xFrame NIC into the user given
5411 * buffer area.
5412 * Return value :
5413 * void .
5414 */
5415
5416static void s2io_ethtool_gregs(struct net_device *dev,
5417 struct ethtool_regs *regs, void *space)
5418{
5419 int i;
5420 u64 reg;
5421 u8 *reg_space = (u8 *)space;
5422 struct s2io_nic *sp = netdev_priv(dev);
5423
5424 regs->len = XENA_REG_SPACE;
5425 regs->version = sp->pdev->subsystem_device;
5426
5427 for (i = 0; i < regs->len; i += 8) {
5428 reg = readq(sp->bar0 + i);
5429 memcpy((reg_space + i), &reg, 8);
5430 }
5431}
5432
5433/*
5434 * s2io_set_led - control NIC led
5435 */
5436static void s2io_set_led(struct s2io_nic *sp, bool on)
5437{
5438 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5439 u16 subid = sp->pdev->subsystem_device;
5440 u64 val64;
5441
5442 if ((sp->device_type == XFRAME_II_DEVICE) ||
5443 ((subid & 0xFF) >= 0x07)) {
5444 val64 = readq(&bar0->gpio_control);
5445 if (on)
5446 val64 |= GPIO_CTRL_GPIO_0;
5447 else
5448 val64 &= ~GPIO_CTRL_GPIO_0;
5449
5450 writeq(val64, &bar0->gpio_control);
5451 } else {
5452 val64 = readq(&bar0->adapter_control);
5453 if (on)
5454 val64 |= ADAPTER_LED_ON;
5455 else
5456 val64 &= ~ADAPTER_LED_ON;
5457
5458 writeq(val64, &bar0->adapter_control);
5459 }
5460
5461}
5462
5463/**
5464 * s2io_ethtool_set_led - To physically identify the nic on the system.
5465 * @dev : network device
5466 * @state: led setting
5467 *
5468 * Description: Used to physically identify the NIC on the system.
5469 * The Link LED will blink for a time specified by the user for
5470 * identification.
5471 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5472 * identification is possible only if it's link is up.
5473 */
5474
5475static int s2io_ethtool_set_led(struct net_device *dev,
5476 enum ethtool_phys_id_state state)
5477{
5478 struct s2io_nic *sp = netdev_priv(dev);
5479 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5480 u16 subid = sp->pdev->subsystem_device;
5481
5482 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5483 u64 val64 = readq(&bar0->adapter_control);
5484 if (!(val64 & ADAPTER_CNTL_EN)) {
5485 pr_err("Adapter Link down, cannot blink LED\n");
5486 return -EAGAIN;
5487 }
5488 }
5489
5490 switch (state) {
5491 case ETHTOOL_ID_ACTIVE:
5492 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5493 return 1; /* cycle on/off once per second */
5494
5495 case ETHTOOL_ID_ON:
5496 s2io_set_led(sp, true);
5497 break;
5498
5499 case ETHTOOL_ID_OFF:
5500 s2io_set_led(sp, false);
5501 break;
5502
5503 case ETHTOOL_ID_INACTIVE:
5504 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5505 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5506 }
5507
5508 return 0;
5509}
5510
5511static void s2io_ethtool_gringparam(struct net_device *dev,
5512 struct ethtool_ringparam *ering)
5513{
5514 struct s2io_nic *sp = netdev_priv(dev);
5515 int i, tx_desc_count = 0, rx_desc_count = 0;
5516
5517 if (sp->rxd_mode == RXD_MODE_1) {
5518 ering->rx_max_pending = MAX_RX_DESC_1;
5519 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5520 } else {
5521 ering->rx_max_pending = MAX_RX_DESC_2;
5522 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5523 }
5524
5525 ering->rx_mini_max_pending = 0;
5526 ering->tx_max_pending = MAX_TX_DESC;
5527
5528 for (i = 0; i < sp->config.rx_ring_num; i++)
5529 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5530 ering->rx_pending = rx_desc_count;
5531 ering->rx_jumbo_pending = rx_desc_count;
5532 ering->rx_mini_pending = 0;
5533
5534 for (i = 0; i < sp->config.tx_fifo_num; i++)
5535 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5536 ering->tx_pending = tx_desc_count;
5537 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5538}
5539
5540/**
5541 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5542 * @sp : private member of the device structure, which is a pointer to the
5543 * s2io_nic structure.
5544 * @ep : pointer to the structure with pause parameters given by ethtool.
5545 * Description:
5546 * Returns the Pause frame generation and reception capability of the NIC.
5547 * Return value:
5548 * void
5549 */
5550static void s2io_ethtool_getpause_data(struct net_device *dev,
5551 struct ethtool_pauseparam *ep)
5552{
5553 u64 val64;
5554 struct s2io_nic *sp = netdev_priv(dev);
5555 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5556
5557 val64 = readq(&bar0->rmac_pause_cfg);
5558 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5559 ep->tx_pause = true;
5560 if (val64 & RMAC_PAUSE_RX_ENABLE)
5561 ep->rx_pause = true;
5562 ep->autoneg = false;
5563}
5564
5565/**
5566 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5567 * @sp : private member of the device structure, which is a pointer to the
5568 * s2io_nic structure.
5569 * @ep : pointer to the structure with pause parameters given by ethtool.
5570 * Description:
5571 * It can be used to set or reset Pause frame generation or reception
5572 * support of the NIC.
5573 * Return value:
5574 * int, returns 0 on Success
5575 */
5576
5577static int s2io_ethtool_setpause_data(struct net_device *dev,
5578 struct ethtool_pauseparam *ep)
5579{
5580 u64 val64;
5581 struct s2io_nic *sp = netdev_priv(dev);
5582 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5583
5584 val64 = readq(&bar0->rmac_pause_cfg);
5585 if (ep->tx_pause)
5586 val64 |= RMAC_PAUSE_GEN_ENABLE;
5587 else
5588 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5589 if (ep->rx_pause)
5590 val64 |= RMAC_PAUSE_RX_ENABLE;
5591 else
5592 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5593 writeq(val64, &bar0->rmac_pause_cfg);
5594 return 0;
5595}
5596
5597/**
5598 * read_eeprom - reads 4 bytes of data from user given offset.
5599 * @sp : private member of the device structure, which is a pointer to the
5600 * s2io_nic structure.
5601 * @off : offset at which the data must be written
5602 * @data : Its an output parameter where the data read at the given
5603 * offset is stored.
5604 * Description:
5605 * Will read 4 bytes of data from the user given offset and return the
5606 * read data.
5607 * NOTE: Will allow to read only part of the EEPROM visible through the
5608 * I2C bus.
5609 * Return value:
5610 * -1 on failure and 0 on success.
5611 */
5612
5613#define S2IO_DEV_ID 5
5614static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5615{
5616 int ret = -1;
5617 u32 exit_cnt = 0;
5618 u64 val64;
5619 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5620
5621 if (sp->device_type == XFRAME_I_DEVICE) {
5622 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5623 I2C_CONTROL_ADDR(off) |
5624 I2C_CONTROL_BYTE_CNT(0x3) |
5625 I2C_CONTROL_READ |
5626 I2C_CONTROL_CNTL_START;
5627 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5628
5629 while (exit_cnt < 5) {
5630 val64 = readq(&bar0->i2c_control);
5631 if (I2C_CONTROL_CNTL_END(val64)) {
5632 *data = I2C_CONTROL_GET_DATA(val64);
5633 ret = 0;
5634 break;
5635 }
5636 msleep(50);
5637 exit_cnt++;
5638 }
5639 }
5640
5641 if (sp->device_type == XFRAME_II_DEVICE) {
5642 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5643 SPI_CONTROL_BYTECNT(0x3) |
5644 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5645 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5646 val64 |= SPI_CONTROL_REQ;
5647 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5648 while (exit_cnt < 5) {
5649 val64 = readq(&bar0->spi_control);
5650 if (val64 & SPI_CONTROL_NACK) {
5651 ret = 1;
5652 break;
5653 } else if (val64 & SPI_CONTROL_DONE) {
5654 *data = readq(&bar0->spi_data);
5655 *data &= 0xffffff;
5656 ret = 0;
5657 break;
5658 }
5659 msleep(50);
5660 exit_cnt++;
5661 }
5662 }
5663 return ret;
5664}
5665
5666/**
5667 * write_eeprom - actually writes the relevant part of the data value.
5668 * @sp : private member of the device structure, which is a pointer to the
5669 * s2io_nic structure.
5670 * @off : offset at which the data must be written
5671 * @data : The data that is to be written
5672 * @cnt : Number of bytes of the data that are actually to be written into
5673 * the Eeprom. (max of 3)
5674 * Description:
5675 * Actually writes the relevant part of the data value into the Eeprom
5676 * through the I2C bus.
5677 * Return value:
5678 * 0 on success, -1 on failure.
5679 */
5680
5681static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5682{
5683 int exit_cnt = 0, ret = -1;
5684 u64 val64;
5685 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5686
5687 if (sp->device_type == XFRAME_I_DEVICE) {
5688 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5689 I2C_CONTROL_ADDR(off) |
5690 I2C_CONTROL_BYTE_CNT(cnt) |
5691 I2C_CONTROL_SET_DATA((u32)data) |
5692 I2C_CONTROL_CNTL_START;
5693 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5694
5695 while (exit_cnt < 5) {
5696 val64 = readq(&bar0->i2c_control);
5697 if (I2C_CONTROL_CNTL_END(val64)) {
5698 if (!(val64 & I2C_CONTROL_NACK))
5699 ret = 0;
5700 break;
5701 }
5702 msleep(50);
5703 exit_cnt++;
5704 }
5705 }
5706
5707 if (sp->device_type == XFRAME_II_DEVICE) {
5708 int write_cnt = (cnt == 8) ? 0 : cnt;
5709 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5710
5711 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5712 SPI_CONTROL_BYTECNT(write_cnt) |
5713 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5714 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5715 val64 |= SPI_CONTROL_REQ;
5716 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5717 while (exit_cnt < 5) {
5718 val64 = readq(&bar0->spi_control);
5719 if (val64 & SPI_CONTROL_NACK) {
5720 ret = 1;
5721 break;
5722 } else if (val64 & SPI_CONTROL_DONE) {
5723 ret = 0;
5724 break;
5725 }
5726 msleep(50);
5727 exit_cnt++;
5728 }
5729 }
5730 return ret;
5731}
5732static void s2io_vpd_read(struct s2io_nic *nic)
5733{
5734 u8 *vpd_data;
5735 u8 data;
5736 int i = 0, cnt, len, fail = 0;
5737 int vpd_addr = 0x80;
5738 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5739
5740 if (nic->device_type == XFRAME_II_DEVICE) {
5741 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5742 vpd_addr = 0x80;
5743 } else {
5744 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5745 vpd_addr = 0x50;
5746 }
5747 strcpy(nic->serial_num, "NOT AVAILABLE");
5748
5749 vpd_data = kmalloc(256, GFP_KERNEL);
5750 if (!vpd_data) {
5751 swstats->mem_alloc_fail_cnt++;
5752 return;
5753 }
5754 swstats->mem_allocated += 256;
5755
5756 for (i = 0; i < 256; i += 4) {
5757 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5758 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5759 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5760 for (cnt = 0; cnt < 5; cnt++) {
5761 msleep(2);
5762 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5763 if (data == 0x80)
5764 break;
5765 }
5766 if (cnt >= 5) {
5767 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5768 fail = 1;
5769 break;
5770 }
5771 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5772 (u32 *)&vpd_data[i]);
5773 }
5774
5775 if (!fail) {
5776 /* read serial number of adapter */
5777 for (cnt = 0; cnt < 252; cnt++) {
5778 if ((vpd_data[cnt] == 'S') &&
5779 (vpd_data[cnt+1] == 'N')) {
5780 len = vpd_data[cnt+2];
5781 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5782 memcpy(nic->serial_num,
5783 &vpd_data[cnt + 3],
5784 len);
5785 memset(nic->serial_num+len,
5786 0,
5787 VPD_STRING_LEN-len);
5788 break;
5789 }
5790 }
5791 }
5792 }
5793
5794 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5795 len = vpd_data[1];
5796 memcpy(nic->product_name, &vpd_data[3], len);
5797 nic->product_name[len] = 0;
5798 }
5799 kfree(vpd_data);
5800 swstats->mem_freed += 256;
5801}
5802
5803/**
5804 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5805 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5806 * @eeprom : pointer to the user level structure provided by ethtool,
5807 * containing all relevant information.
5808 * @data_buf : user defined value to be written into Eeprom.
5809 * Description: Reads the values stored in the Eeprom at given offset
5810 * for a given length. Stores these values int the input argument data
5811 * buffer 'data_buf' and returns these to the caller (ethtool.)
5812 * Return value:
5813 * int 0 on success
5814 */
5815
5816static int s2io_ethtool_geeprom(struct net_device *dev,
5817 struct ethtool_eeprom *eeprom, u8 * data_buf)
5818{
5819 u32 i, valid;
5820 u64 data;
5821 struct s2io_nic *sp = netdev_priv(dev);
5822
5823 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5824
5825 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5826 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5827
5828 for (i = 0; i < eeprom->len; i += 4) {
5829 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5830 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5831 return -EFAULT;
5832 }
5833 valid = INV(data);
5834 memcpy((data_buf + i), &valid, 4);
5835 }
5836 return 0;
5837}
5838
5839/**
5840 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5841 * @sp : private member of the device structure, which is a pointer to the
5842 * s2io_nic structure.
5843 * @eeprom : pointer to the user level structure provided by ethtool,
5844 * containing all relevant information.
5845 * @data_buf ; user defined value to be written into Eeprom.
5846 * Description:
5847 * Tries to write the user provided value in the Eeprom, at the offset
5848 * given by the user.
5849 * Return value:
5850 * 0 on success, -EFAULT on failure.
5851 */
5852
5853static int s2io_ethtool_seeprom(struct net_device *dev,
5854 struct ethtool_eeprom *eeprom,
5855 u8 *data_buf)
5856{
5857 int len = eeprom->len, cnt = 0;
5858 u64 valid = 0, data;
5859 struct s2io_nic *sp = netdev_priv(dev);
5860
5861 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5862 DBG_PRINT(ERR_DBG,
5863 "ETHTOOL_WRITE_EEPROM Err: "
5864 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5865 (sp->pdev->vendor | (sp->pdev->device << 16)),
5866 eeprom->magic);
5867 return -EFAULT;
5868 }
5869
5870 while (len) {
5871 data = (u32)data_buf[cnt] & 0x000000FF;
5872 if (data)
5873 valid = (u32)(data << 24);
5874 else
5875 valid = data;
5876
5877 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5878 DBG_PRINT(ERR_DBG,
5879 "ETHTOOL_WRITE_EEPROM Err: "
5880 "Cannot write into the specified offset\n");
5881 return -EFAULT;
5882 }
5883 cnt++;
5884 len--;
5885 }
5886
5887 return 0;
5888}
5889
5890/**
5891 * s2io_register_test - reads and writes into all clock domains.
5892 * @sp : private member of the device structure, which is a pointer to the
5893 * s2io_nic structure.
5894 * @data : variable that returns the result of each of the test conducted b
5895 * by the driver.
5896 * Description:
5897 * Read and write into all clock domains. The NIC has 3 clock domains,
5898 * see that registers in all the three regions are accessible.
5899 * Return value:
5900 * 0 on success.
5901 */
5902
5903static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5904{
5905 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5906 u64 val64 = 0, exp_val;
5907 int fail = 0;
5908
5909 val64 = readq(&bar0->pif_rd_swapper_fb);
5910 if (val64 != 0x123456789abcdefULL) {
5911 fail = 1;
5912 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5913 }
5914
5915 val64 = readq(&bar0->rmac_pause_cfg);
5916 if (val64 != 0xc000ffff00000000ULL) {
5917 fail = 1;
5918 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5919 }
5920
5921 val64 = readq(&bar0->rx_queue_cfg);
5922 if (sp->device_type == XFRAME_II_DEVICE)
5923 exp_val = 0x0404040404040404ULL;
5924 else
5925 exp_val = 0x0808080808080808ULL;
5926 if (val64 != exp_val) {
5927 fail = 1;
5928 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5929 }
5930
5931 val64 = readq(&bar0->xgxs_efifo_cfg);
5932 if (val64 != 0x000000001923141EULL) {
5933 fail = 1;
5934 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5935 }
5936
5937 val64 = 0x5A5A5A5A5A5A5A5AULL;
5938 writeq(val64, &bar0->xmsi_data);
5939 val64 = readq(&bar0->xmsi_data);
5940 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5941 fail = 1;
5942 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5943 }
5944
5945 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5946 writeq(val64, &bar0->xmsi_data);
5947 val64 = readq(&bar0->xmsi_data);
5948 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5949 fail = 1;
5950 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5951 }
5952
5953 *data = fail;
5954 return fail;
5955}
5956
5957/**
5958 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5959 * @sp : private member of the device structure, which is a pointer to the
5960 * s2io_nic structure.
5961 * @data:variable that returns the result of each of the test conducted by
5962 * the driver.
5963 * Description:
5964 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5965 * register.
5966 * Return value:
5967 * 0 on success.
5968 */
5969
5970static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5971{
5972 int fail = 0;
5973 u64 ret_data, org_4F0, org_7F0;
5974 u8 saved_4F0 = 0, saved_7F0 = 0;
5975 struct net_device *dev = sp->dev;
5976
5977 /* Test Write Error at offset 0 */
5978 /* Note that SPI interface allows write access to all areas
5979 * of EEPROM. Hence doing all negative testing only for Xframe I.
5980 */
5981 if (sp->device_type == XFRAME_I_DEVICE)
5982 if (!write_eeprom(sp, 0, 0, 3))
5983 fail = 1;
5984
5985 /* Save current values at offsets 0x4F0 and 0x7F0 */
5986 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5987 saved_4F0 = 1;
5988 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5989 saved_7F0 = 1;
5990
5991 /* Test Write at offset 4f0 */
5992 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5993 fail = 1;
5994 if (read_eeprom(sp, 0x4F0, &ret_data))
5995 fail = 1;
5996
5997 if (ret_data != 0x012345) {
5998 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5999 "Data written %llx Data read %llx\n",
6000 dev->name, (unsigned long long)0x12345,
6001 (unsigned long long)ret_data);
6002 fail = 1;
6003 }
6004
6005 /* Reset the EEPROM data go FFFF */
6006 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6007
6008 /* Test Write Request Error at offset 0x7c */
6009 if (sp->device_type == XFRAME_I_DEVICE)
6010 if (!write_eeprom(sp, 0x07C, 0, 3))
6011 fail = 1;
6012
6013 /* Test Write Request at offset 0x7f0 */
6014 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6015 fail = 1;
6016 if (read_eeprom(sp, 0x7F0, &ret_data))
6017 fail = 1;
6018
6019 if (ret_data != 0x012345) {
6020 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6021 "Data written %llx Data read %llx\n",
6022 dev->name, (unsigned long long)0x12345,
6023 (unsigned long long)ret_data);
6024 fail = 1;
6025 }
6026
6027 /* Reset the EEPROM data go FFFF */
6028 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6029
6030 if (sp->device_type == XFRAME_I_DEVICE) {
6031 /* Test Write Error at offset 0x80 */
6032 if (!write_eeprom(sp, 0x080, 0, 3))
6033 fail = 1;
6034
6035 /* Test Write Error at offset 0xfc */
6036 if (!write_eeprom(sp, 0x0FC, 0, 3))
6037 fail = 1;
6038
6039 /* Test Write Error at offset 0x100 */
6040 if (!write_eeprom(sp, 0x100, 0, 3))
6041 fail = 1;
6042
6043 /* Test Write Error at offset 4ec */
6044 if (!write_eeprom(sp, 0x4EC, 0, 3))
6045 fail = 1;
6046 }
6047
6048 /* Restore values at offsets 0x4F0 and 0x7F0 */
6049 if (saved_4F0)
6050 write_eeprom(sp, 0x4F0, org_4F0, 3);
6051 if (saved_7F0)
6052 write_eeprom(sp, 0x7F0, org_7F0, 3);
6053
6054 *data = fail;
6055 return fail;
6056}
6057
6058/**
6059 * s2io_bist_test - invokes the MemBist test of the card .
6060 * @sp : private member of the device structure, which is a pointer to the
6061 * s2io_nic structure.
6062 * @data:variable that returns the result of each of the test conducted by
6063 * the driver.
6064 * Description:
6065 * This invokes the MemBist test of the card. We give around
6066 * 2 secs time for the Test to complete. If it's still not complete
6067 * within this peiod, we consider that the test failed.
6068 * Return value:
6069 * 0 on success and -1 on failure.
6070 */
6071
6072static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6073{
6074 u8 bist = 0;
6075 int cnt = 0, ret = -1;
6076
6077 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6078 bist |= PCI_BIST_START;
6079 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6080
6081 while (cnt < 20) {
6082 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6083 if (!(bist & PCI_BIST_START)) {
6084 *data = (bist & PCI_BIST_CODE_MASK);
6085 ret = 0;
6086 break;
6087 }
6088 msleep(100);
6089 cnt++;
6090 }
6091
6092 return ret;
6093}
6094
6095/**
6096 * s2io-link_test - verifies the link state of the nic
6097 * @sp ; private member of the device structure, which is a pointer to the
6098 * s2io_nic structure.
6099 * @data: variable that returns the result of each of the test conducted by
6100 * the driver.
6101 * Description:
6102 * The function verifies the link state of the NIC and updates the input
6103 * argument 'data' appropriately.
6104 * Return value:
6105 * 0 on success.
6106 */
6107
6108static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6109{
6110 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6111 u64 val64;
6112
6113 val64 = readq(&bar0->adapter_status);
6114 if (!(LINK_IS_UP(val64)))
6115 *data = 1;
6116 else
6117 *data = 0;
6118
6119 return *data;
6120}
6121
6122/**
6123 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6124 * @sp - private member of the device structure, which is a pointer to the
6125 * s2io_nic structure.
6126 * @data - variable that returns the result of each of the test
6127 * conducted by the driver.
6128 * Description:
6129 * This is one of the offline test that tests the read and write
6130 * access to the RldRam chip on the NIC.
6131 * Return value:
6132 * 0 on success.
6133 */
6134
6135static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6136{
6137 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6138 u64 val64;
6139 int cnt, iteration = 0, test_fail = 0;
6140
6141 val64 = readq(&bar0->adapter_control);
6142 val64 &= ~ADAPTER_ECC_EN;
6143 writeq(val64, &bar0->adapter_control);
6144
6145 val64 = readq(&bar0->mc_rldram_test_ctrl);
6146 val64 |= MC_RLDRAM_TEST_MODE;
6147 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6148
6149 val64 = readq(&bar0->mc_rldram_mrs);
6150 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6151 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6152
6153 val64 |= MC_RLDRAM_MRS_ENABLE;
6154 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6155
6156 while (iteration < 2) {
6157 val64 = 0x55555555aaaa0000ULL;
6158 if (iteration == 1)
6159 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6160 writeq(val64, &bar0->mc_rldram_test_d0);
6161
6162 val64 = 0xaaaa5a5555550000ULL;
6163 if (iteration == 1)
6164 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6165 writeq(val64, &bar0->mc_rldram_test_d1);
6166
6167 val64 = 0x55aaaaaaaa5a0000ULL;
6168 if (iteration == 1)
6169 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6170 writeq(val64, &bar0->mc_rldram_test_d2);
6171
6172 val64 = (u64) (0x0000003ffffe0100ULL);
6173 writeq(val64, &bar0->mc_rldram_test_add);
6174
6175 val64 = MC_RLDRAM_TEST_MODE |
6176 MC_RLDRAM_TEST_WRITE |
6177 MC_RLDRAM_TEST_GO;
6178 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6179
6180 for (cnt = 0; cnt < 5; cnt++) {
6181 val64 = readq(&bar0->mc_rldram_test_ctrl);
6182 if (val64 & MC_RLDRAM_TEST_DONE)
6183 break;
6184 msleep(200);
6185 }
6186
6187 if (cnt == 5)
6188 break;
6189
6190 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6191 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6192
6193 for (cnt = 0; cnt < 5; cnt++) {
6194 val64 = readq(&bar0->mc_rldram_test_ctrl);
6195 if (val64 & MC_RLDRAM_TEST_DONE)
6196 break;
6197 msleep(500);
6198 }
6199
6200 if (cnt == 5)
6201 break;
6202
6203 val64 = readq(&bar0->mc_rldram_test_ctrl);
6204 if (!(val64 & MC_RLDRAM_TEST_PASS))
6205 test_fail = 1;
6206
6207 iteration++;
6208 }
6209
6210 *data = test_fail;
6211
6212 /* Bring the adapter out of test mode */
6213 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6214
6215 return test_fail;
6216}
6217
6218/**
6219 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6220 * @sp : private member of the device structure, which is a pointer to the
6221 * s2io_nic structure.
6222 * @ethtest : pointer to a ethtool command specific structure that will be
6223 * returned to the user.
6224 * @data : variable that returns the result of each of the test
6225 * conducted by the driver.
6226 * Description:
6227 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6228 * the health of the card.
6229 * Return value:
6230 * void
6231 */
6232
6233static void s2io_ethtool_test(struct net_device *dev,
6234 struct ethtool_test *ethtest,
6235 uint64_t *data)
6236{
6237 struct s2io_nic *sp = netdev_priv(dev);
6238 int orig_state = netif_running(sp->dev);
6239
6240 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6241 /* Offline Tests. */
6242 if (orig_state)
6243 s2io_close(sp->dev);
6244
6245 if (s2io_register_test(sp, &data[0]))
6246 ethtest->flags |= ETH_TEST_FL_FAILED;
6247
6248 s2io_reset(sp);
6249
6250 if (s2io_rldram_test(sp, &data[3]))
6251 ethtest->flags |= ETH_TEST_FL_FAILED;
6252
6253 s2io_reset(sp);
6254
6255 if (s2io_eeprom_test(sp, &data[1]))
6256 ethtest->flags |= ETH_TEST_FL_FAILED;
6257
6258 if (s2io_bist_test(sp, &data[4]))
6259 ethtest->flags |= ETH_TEST_FL_FAILED;
6260
6261 if (orig_state)
6262 s2io_open(sp->dev);
6263
6264 data[2] = 0;
6265 } else {
6266 /* Online Tests. */
6267 if (!orig_state) {
6268 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6269 dev->name);
6270 data[0] = -1;
6271 data[1] = -1;
6272 data[2] = -1;
6273 data[3] = -1;
6274 data[4] = -1;
6275 }
6276
6277 if (s2io_link_test(sp, &data[2]))
6278 ethtest->flags |= ETH_TEST_FL_FAILED;
6279
6280 data[0] = 0;
6281 data[1] = 0;
6282 data[3] = 0;
6283 data[4] = 0;
6284 }
6285}
6286
6287static void s2io_get_ethtool_stats(struct net_device *dev,
6288 struct ethtool_stats *estats,
6289 u64 *tmp_stats)
6290{
6291 int i = 0, k;
6292 struct s2io_nic *sp = netdev_priv(dev);
6293 struct stat_block *stats = sp->mac_control.stats_info;
6294 struct swStat *swstats = &stats->sw_stat;
6295 struct xpakStat *xstats = &stats->xpak_stat;
6296
6297 s2io_updt_stats(sp);
6298 tmp_stats[i++] =
6299 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6300 le32_to_cpu(stats->tmac_frms);
6301 tmp_stats[i++] =
6302 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6303 le32_to_cpu(stats->tmac_data_octets);
6304 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6305 tmp_stats[i++] =
6306 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6307 le32_to_cpu(stats->tmac_mcst_frms);
6308 tmp_stats[i++] =
6309 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6310 le32_to_cpu(stats->tmac_bcst_frms);
6311 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6312 tmp_stats[i++] =
6313 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6314 le32_to_cpu(stats->tmac_ttl_octets);
6315 tmp_stats[i++] =
6316 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6317 le32_to_cpu(stats->tmac_ucst_frms);
6318 tmp_stats[i++] =
6319 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6320 le32_to_cpu(stats->tmac_nucst_frms);
6321 tmp_stats[i++] =
6322 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6323 le32_to_cpu(stats->tmac_any_err_frms);
6324 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6325 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6326 tmp_stats[i++] =
6327 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6328 le32_to_cpu(stats->tmac_vld_ip);
6329 tmp_stats[i++] =
6330 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6331 le32_to_cpu(stats->tmac_drop_ip);
6332 tmp_stats[i++] =
6333 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6334 le32_to_cpu(stats->tmac_icmp);
6335 tmp_stats[i++] =
6336 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6337 le32_to_cpu(stats->tmac_rst_tcp);
6338 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6339 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6340 le32_to_cpu(stats->tmac_udp);
6341 tmp_stats[i++] =
6342 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6343 le32_to_cpu(stats->rmac_vld_frms);
6344 tmp_stats[i++] =
6345 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6346 le32_to_cpu(stats->rmac_data_octets);
6347 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6348 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6349 tmp_stats[i++] =
6350 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6351 le32_to_cpu(stats->rmac_vld_mcst_frms);
6352 tmp_stats[i++] =
6353 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6354 le32_to_cpu(stats->rmac_vld_bcst_frms);
6355 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6356 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6357 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6358 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6359 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6360 tmp_stats[i++] =
6361 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6362 le32_to_cpu(stats->rmac_ttl_octets);
6363 tmp_stats[i++] =
6364 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6365 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
6366 tmp_stats[i++] =
6367 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6368 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6369 tmp_stats[i++] =
6370 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6371 le32_to_cpu(stats->rmac_discarded_frms);
6372 tmp_stats[i++] =
6373 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6374 << 32 | le32_to_cpu(stats->rmac_drop_events);
6375 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6376 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6377 tmp_stats[i++] =
6378 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6379 le32_to_cpu(stats->rmac_usized_frms);
6380 tmp_stats[i++] =
6381 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6382 le32_to_cpu(stats->rmac_osized_frms);
6383 tmp_stats[i++] =
6384 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6385 le32_to_cpu(stats->rmac_frag_frms);
6386 tmp_stats[i++] =
6387 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6388 le32_to_cpu(stats->rmac_jabber_frms);
6389 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6390 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6391 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6392 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6393 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6394 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6395 tmp_stats[i++] =
6396 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6397 le32_to_cpu(stats->rmac_ip);
6398 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6399 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6400 tmp_stats[i++] =
6401 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6402 le32_to_cpu(stats->rmac_drop_ip);
6403 tmp_stats[i++] =
6404 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6405 le32_to_cpu(stats->rmac_icmp);
6406 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6407 tmp_stats[i++] =
6408 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6409 le32_to_cpu(stats->rmac_udp);
6410 tmp_stats[i++] =
6411 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6412 le32_to_cpu(stats->rmac_err_drp_udp);
6413 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6414 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6415 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6416 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6417 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6418 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6419 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6420 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6421 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6422 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6423 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6424 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6425 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6426 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6427 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6428 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6429 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6430 tmp_stats[i++] =
6431 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6432 le32_to_cpu(stats->rmac_pause_cnt);
6433 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6434 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6435 tmp_stats[i++] =
6436 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6437 le32_to_cpu(stats->rmac_accepted_ip);
6438 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6439 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6440 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6441 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6442 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6443 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6444 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6445 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6446 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6447 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6448 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6449 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6450 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6451 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6452 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6453 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6454 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6455 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6456 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6457
6458 /* Enhanced statistics exist only for Hercules */
6459 if (sp->device_type == XFRAME_II_DEVICE) {
6460 tmp_stats[i++] =
6461 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6462 tmp_stats[i++] =
6463 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6464 tmp_stats[i++] =
6465 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6466 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6467 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6468 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6469 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6470 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6471 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6472 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6473 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6474 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6475 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6476 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6477 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6478 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6479 }
6480
6481 tmp_stats[i++] = 0;
6482 tmp_stats[i++] = swstats->single_ecc_errs;
6483 tmp_stats[i++] = swstats->double_ecc_errs;
6484 tmp_stats[i++] = swstats->parity_err_cnt;
6485 tmp_stats[i++] = swstats->serious_err_cnt;
6486 tmp_stats[i++] = swstats->soft_reset_cnt;
6487 tmp_stats[i++] = swstats->fifo_full_cnt;
6488 for (k = 0; k < MAX_RX_RINGS; k++)
6489 tmp_stats[i++] = swstats->ring_full_cnt[k];
6490 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6491 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6492 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6493 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6494 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6495 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6496 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6497 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6498 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6499 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6500 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6501 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6502 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6503 tmp_stats[i++] = swstats->sending_both;
6504 tmp_stats[i++] = swstats->outof_sequence_pkts;
6505 tmp_stats[i++] = swstats->flush_max_pkts;
6506 if (swstats->num_aggregations) {
6507 u64 tmp = swstats->sum_avg_pkts_aggregated;
6508 int count = 0;
6509 /*
6510 * Since 64-bit divide does not work on all platforms,
6511 * do repeated subtraction.
6512 */
6513 while (tmp >= swstats->num_aggregations) {
6514 tmp -= swstats->num_aggregations;
6515 count++;
6516 }
6517 tmp_stats[i++] = count;
6518 } else
6519 tmp_stats[i++] = 0;
6520 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6521 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6522 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6523 tmp_stats[i++] = swstats->mem_allocated;
6524 tmp_stats[i++] = swstats->mem_freed;
6525 tmp_stats[i++] = swstats->link_up_cnt;
6526 tmp_stats[i++] = swstats->link_down_cnt;
6527 tmp_stats[i++] = swstats->link_up_time;
6528 tmp_stats[i++] = swstats->link_down_time;
6529
6530 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6531 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6532 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6533 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6534 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6535
6536 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6537 tmp_stats[i++] = swstats->rx_abort_cnt;
6538 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6539 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6540 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6541 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6542 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6543 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6544 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6545 tmp_stats[i++] = swstats->tda_err_cnt;
6546 tmp_stats[i++] = swstats->pfc_err_cnt;
6547 tmp_stats[i++] = swstats->pcc_err_cnt;
6548 tmp_stats[i++] = swstats->tti_err_cnt;
6549 tmp_stats[i++] = swstats->tpa_err_cnt;
6550 tmp_stats[i++] = swstats->sm_err_cnt;
6551 tmp_stats[i++] = swstats->lso_err_cnt;
6552 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6553 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6554 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6555 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6556 tmp_stats[i++] = swstats->rc_err_cnt;
6557 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6558 tmp_stats[i++] = swstats->rpa_err_cnt;
6559 tmp_stats[i++] = swstats->rda_err_cnt;
6560 tmp_stats[i++] = swstats->rti_err_cnt;
6561 tmp_stats[i++] = swstats->mc_err_cnt;
6562}
6563
6564static int s2io_ethtool_get_regs_len(struct net_device *dev)
6565{
6566 return XENA_REG_SPACE;
6567}
6568
6569
6570static int s2io_get_eeprom_len(struct net_device *dev)
6571{
6572 return XENA_EEPROM_SPACE;
6573}
6574
6575static int s2io_get_sset_count(struct net_device *dev, int sset)
6576{
6577 struct s2io_nic *sp = netdev_priv(dev);
6578
6579 switch (sset) {
6580 case ETH_SS_TEST:
6581 return S2IO_TEST_LEN;
6582 case ETH_SS_STATS:
6583 switch (sp->device_type) {
6584 case XFRAME_I_DEVICE:
6585 return XFRAME_I_STAT_LEN;
6586 case XFRAME_II_DEVICE:
6587 return XFRAME_II_STAT_LEN;
6588 default:
6589 return 0;
6590 }
6591 default:
6592 return -EOPNOTSUPP;
6593 }
6594}
6595
6596static void s2io_ethtool_get_strings(struct net_device *dev,
6597 u32 stringset, u8 *data)
6598{
6599 int stat_size = 0;
6600 struct s2io_nic *sp = netdev_priv(dev);
6601
6602 switch (stringset) {
6603 case ETH_SS_TEST:
6604 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6605 break;
6606 case ETH_SS_STATS:
6607 stat_size = sizeof(ethtool_xena_stats_keys);
6608 memcpy(data, &ethtool_xena_stats_keys, stat_size);
6609 if (sp->device_type == XFRAME_II_DEVICE) {
6610 memcpy(data + stat_size,
6611 &ethtool_enhanced_stats_keys,
6612 sizeof(ethtool_enhanced_stats_keys));
6613 stat_size += sizeof(ethtool_enhanced_stats_keys);
6614 }
6615
6616 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6617 sizeof(ethtool_driver_stats_keys));
6618 }
6619}
6620
6621static int s2io_set_features(struct net_device *dev, u32 features)
6622{
6623 struct s2io_nic *sp = netdev_priv(dev);
6624 u32 changed = (features ^ dev->features) & NETIF_F_LRO;
6625
6626 if (changed && netif_running(dev)) {
6627 int rc;
6628
6629 s2io_stop_all_tx_queue(sp);
6630 s2io_card_down(sp);
6631 dev->features = features;
6632 rc = s2io_card_up(sp);
6633 if (rc)
6634 s2io_reset(sp);
6635 else
6636 s2io_start_all_tx_queue(sp);
6637
6638 return rc ? rc : 1;
6639 }
6640
6641 return 0;
6642}
6643
6644static const struct ethtool_ops netdev_ethtool_ops = {
6645 .get_settings = s2io_ethtool_gset,
6646 .set_settings = s2io_ethtool_sset,
6647 .get_drvinfo = s2io_ethtool_gdrvinfo,
6648 .get_regs_len = s2io_ethtool_get_regs_len,
6649 .get_regs = s2io_ethtool_gregs,
6650 .get_link = ethtool_op_get_link,
6651 .get_eeprom_len = s2io_get_eeprom_len,
6652 .get_eeprom = s2io_ethtool_geeprom,
6653 .set_eeprom = s2io_ethtool_seeprom,
6654 .get_ringparam = s2io_ethtool_gringparam,
6655 .get_pauseparam = s2io_ethtool_getpause_data,
6656 .set_pauseparam = s2io_ethtool_setpause_data,
6657 .self_test = s2io_ethtool_test,
6658 .get_strings = s2io_ethtool_get_strings,
6659 .set_phys_id = s2io_ethtool_set_led,
6660 .get_ethtool_stats = s2io_get_ethtool_stats,
6661 .get_sset_count = s2io_get_sset_count,
6662};
6663
6664/**
6665 * s2io_ioctl - Entry point for the Ioctl
6666 * @dev : Device pointer.
6667 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6668 * a proprietary structure used to pass information to the driver.
6669 * @cmd : This is used to distinguish between the different commands that
6670 * can be passed to the IOCTL functions.
6671 * Description:
6672 * Currently there are no special functionality supported in IOCTL, hence
6673 * function always return EOPNOTSUPPORTED
6674 */
6675
6676static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6677{
6678 return -EOPNOTSUPP;
6679}
6680
6681/**
6682 * s2io_change_mtu - entry point to change MTU size for the device.
6683 * @dev : device pointer.
6684 * @new_mtu : the new MTU size for the device.
6685 * Description: A driver entry point to change MTU size for the device.
6686 * Before changing the MTU the device must be stopped.
6687 * Return value:
6688 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6689 * file on failure.
6690 */
6691
6692static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6693{
6694 struct s2io_nic *sp = netdev_priv(dev);
6695 int ret = 0;
6696
6697 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6698 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
6699 return -EPERM;
6700 }
6701
6702 dev->mtu = new_mtu;
6703 if (netif_running(dev)) {
6704 s2io_stop_all_tx_queue(sp);
6705 s2io_card_down(sp);
6706 ret = s2io_card_up(sp);
6707 if (ret) {
6708 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6709 __func__);
6710 return ret;
6711 }
6712 s2io_wake_all_tx_queue(sp);
6713 } else { /* Device is down */
6714 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6715 u64 val64 = new_mtu;
6716
6717 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6718 }
6719
6720 return ret;
6721}
6722
6723/**
6724 * s2io_set_link - Set the LInk status
6725 * @data: long pointer to device private structue
6726 * Description: Sets the link status for the adapter
6727 */
6728
6729static void s2io_set_link(struct work_struct *work)
6730{
6731 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6732 set_link_task);
6733 struct net_device *dev = nic->dev;
6734 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6735 register u64 val64;
6736 u16 subid;
6737
6738 rtnl_lock();
6739
6740 if (!netif_running(dev))
6741 goto out_unlock;
6742
6743 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6744 /* The card is being reset, no point doing anything */
6745 goto out_unlock;
6746 }
6747
6748 subid = nic->pdev->subsystem_device;
6749 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6750 /*
6751 * Allow a small delay for the NICs self initiated
6752 * cleanup to complete.
6753 */
6754 msleep(100);
6755 }
6756
6757 val64 = readq(&bar0->adapter_status);
6758 if (LINK_IS_UP(val64)) {
6759 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6760 if (verify_xena_quiescence(nic)) {
6761 val64 = readq(&bar0->adapter_control);
6762 val64 |= ADAPTER_CNTL_EN;
6763 writeq(val64, &bar0->adapter_control);
6764 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6765 nic->device_type, subid)) {
6766 val64 = readq(&bar0->gpio_control);
6767 val64 |= GPIO_CTRL_GPIO_0;
6768 writeq(val64, &bar0->gpio_control);
6769 val64 = readq(&bar0->gpio_control);
6770 } else {
6771 val64 |= ADAPTER_LED_ON;
6772 writeq(val64, &bar0->adapter_control);
6773 }
6774 nic->device_enabled_once = true;
6775 } else {
6776 DBG_PRINT(ERR_DBG,
6777 "%s: Error: device is not Quiescent\n",
6778 dev->name);
6779 s2io_stop_all_tx_queue(nic);
6780 }
6781 }
6782 val64 = readq(&bar0->adapter_control);
6783 val64 |= ADAPTER_LED_ON;
6784 writeq(val64, &bar0->adapter_control);
6785 s2io_link(nic, LINK_UP);
6786 } else {
6787 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6788 subid)) {
6789 val64 = readq(&bar0->gpio_control);
6790 val64 &= ~GPIO_CTRL_GPIO_0;
6791 writeq(val64, &bar0->gpio_control);
6792 val64 = readq(&bar0->gpio_control);
6793 }
6794 /* turn off LED */
6795 val64 = readq(&bar0->adapter_control);
6796 val64 = val64 & (~ADAPTER_LED_ON);
6797 writeq(val64, &bar0->adapter_control);
6798 s2io_link(nic, LINK_DOWN);
6799 }
6800 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6801
6802out_unlock:
6803 rtnl_unlock();
6804}
6805
6806static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6807 struct buffAdd *ba,
6808 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6809 u64 *temp2, int size)
6810{
6811 struct net_device *dev = sp->dev;
6812 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6813
6814 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6815 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6816 /* allocate skb */
6817 if (*skb) {
6818 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6819 /*
6820 * As Rx frame are not going to be processed,
6821 * using same mapped address for the Rxd
6822 * buffer pointer
6823 */
6824 rxdp1->Buffer0_ptr = *temp0;
6825 } else {
6826 *skb = dev_alloc_skb(size);
6827 if (!(*skb)) {
6828 DBG_PRINT(INFO_DBG,
6829 "%s: Out of memory to allocate %s\n",
6830 dev->name, "1 buf mode SKBs");
6831 stats->mem_alloc_fail_cnt++;
6832 return -ENOMEM ;
6833 }
6834 stats->mem_allocated += (*skb)->truesize;
6835 /* storing the mapped addr in a temp variable
6836 * such it will be used for next rxd whose
6837 * Host Control is NULL
6838 */
6839 rxdp1->Buffer0_ptr = *temp0 =
6840 pci_map_single(sp->pdev, (*skb)->data,
6841 size - NET_IP_ALIGN,
6842 PCI_DMA_FROMDEVICE);
6843 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6844 goto memalloc_failed;
6845 rxdp->Host_Control = (unsigned long) (*skb);
6846 }
6847 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6848 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6849 /* Two buffer Mode */
6850 if (*skb) {
6851 rxdp3->Buffer2_ptr = *temp2;
6852 rxdp3->Buffer0_ptr = *temp0;
6853 rxdp3->Buffer1_ptr = *temp1;
6854 } else {
6855 *skb = dev_alloc_skb(size);
6856 if (!(*skb)) {
6857 DBG_PRINT(INFO_DBG,
6858 "%s: Out of memory to allocate %s\n",
6859 dev->name,
6860 "2 buf mode SKBs");
6861 stats->mem_alloc_fail_cnt++;
6862 return -ENOMEM;
6863 }
6864 stats->mem_allocated += (*skb)->truesize;
6865 rxdp3->Buffer2_ptr = *temp2 =
6866 pci_map_single(sp->pdev, (*skb)->data,
6867 dev->mtu + 4,
6868 PCI_DMA_FROMDEVICE);
6869 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6870 goto memalloc_failed;
6871 rxdp3->Buffer0_ptr = *temp0 =
6872 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6873 PCI_DMA_FROMDEVICE);
6874 if (pci_dma_mapping_error(sp->pdev,
6875 rxdp3->Buffer0_ptr)) {
6876 pci_unmap_single(sp->pdev,
6877 (dma_addr_t)rxdp3->Buffer2_ptr,
6878 dev->mtu + 4,
6879 PCI_DMA_FROMDEVICE);
6880 goto memalloc_failed;
6881 }
6882 rxdp->Host_Control = (unsigned long) (*skb);
6883
6884 /* Buffer-1 will be dummy buffer not used */
6885 rxdp3->Buffer1_ptr = *temp1 =
6886 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6887 PCI_DMA_FROMDEVICE);
6888 if (pci_dma_mapping_error(sp->pdev,
6889 rxdp3->Buffer1_ptr)) {
6890 pci_unmap_single(sp->pdev,
6891 (dma_addr_t)rxdp3->Buffer0_ptr,
6892 BUF0_LEN, PCI_DMA_FROMDEVICE);
6893 pci_unmap_single(sp->pdev,
6894 (dma_addr_t)rxdp3->Buffer2_ptr,
6895 dev->mtu + 4,
6896 PCI_DMA_FROMDEVICE);
6897 goto memalloc_failed;
6898 }
6899 }
6900 }
6901 return 0;
6902
6903memalloc_failed:
6904 stats->pci_map_fail_cnt++;
6905 stats->mem_freed += (*skb)->truesize;
6906 dev_kfree_skb(*skb);
6907 return -ENOMEM;
6908}
6909
6910static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6911 int size)
6912{
6913 struct net_device *dev = sp->dev;
6914 if (sp->rxd_mode == RXD_MODE_1) {
6915 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6916 } else if (sp->rxd_mode == RXD_MODE_3B) {
6917 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6918 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6919 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6920 }
6921}
6922
6923static int rxd_owner_bit_reset(struct s2io_nic *sp)
6924{
6925 int i, j, k, blk_cnt = 0, size;
6926 struct config_param *config = &sp->config;
6927 struct mac_info *mac_control = &sp->mac_control;
6928 struct net_device *dev = sp->dev;
6929 struct RxD_t *rxdp = NULL;
6930 struct sk_buff *skb = NULL;
6931 struct buffAdd *ba = NULL;
6932 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6933
6934 /* Calculate the size based on ring mode */
6935 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6936 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6937 if (sp->rxd_mode == RXD_MODE_1)
6938 size += NET_IP_ALIGN;
6939 else if (sp->rxd_mode == RXD_MODE_3B)
6940 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6941
6942 for (i = 0; i < config->rx_ring_num; i++) {
6943 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6944 struct ring_info *ring = &mac_control->rings[i];
6945
6946 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6947
6948 for (j = 0; j < blk_cnt; j++) {
6949 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6950 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6951 if (sp->rxd_mode == RXD_MODE_3B)
6952 ba = &ring->ba[j][k];
6953 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6954 (u64 *)&temp0_64,
6955 (u64 *)&temp1_64,
6956 (u64 *)&temp2_64,
6957 size) == -ENOMEM) {
6958 return 0;
6959 }
6960
6961 set_rxd_buffer_size(sp, rxdp, size);
6962 wmb();
6963 /* flip the Ownership bit to Hardware */
6964 rxdp->Control_1 |= RXD_OWN_XENA;
6965 }
6966 }
6967 }
6968 return 0;
6969
6970}
6971
6972static int s2io_add_isr(struct s2io_nic *sp)
6973{
6974 int ret = 0;
6975 struct net_device *dev = sp->dev;
6976 int err = 0;
6977
6978 if (sp->config.intr_type == MSI_X)
6979 ret = s2io_enable_msi_x(sp);
6980 if (ret) {
6981 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6982 sp->config.intr_type = INTA;
6983 }
6984
6985 /*
6986 * Store the values of the MSIX table in
6987 * the struct s2io_nic structure
6988 */
6989 store_xmsi_data(sp);
6990
6991 /* After proper initialization of H/W, register ISR */
6992 if (sp->config.intr_type == MSI_X) {
6993 int i, msix_rx_cnt = 0;
6994
6995 for (i = 0; i < sp->num_entries; i++) {
6996 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6997 if (sp->s2io_entries[i].type ==
6998 MSIX_RING_TYPE) {
6999 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7000 dev->name, i);
7001 err = request_irq(sp->entries[i].vector,
7002 s2io_msix_ring_handle,
7003 0,
7004 sp->desc[i],
7005 sp->s2io_entries[i].arg);
7006 } else if (sp->s2io_entries[i].type ==
7007 MSIX_ALARM_TYPE) {
7008 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7009 dev->name, i);
7010 err = request_irq(sp->entries[i].vector,
7011 s2io_msix_fifo_handle,
7012 0,
7013 sp->desc[i],
7014 sp->s2io_entries[i].arg);
7015
7016 }
7017 /* if either data or addr is zero print it. */
7018 if (!(sp->msix_info[i].addr &&
7019 sp->msix_info[i].data)) {
7020 DBG_PRINT(ERR_DBG,
7021 "%s @Addr:0x%llx Data:0x%llx\n",
7022 sp->desc[i],
7023 (unsigned long long)
7024 sp->msix_info[i].addr,
7025 (unsigned long long)
7026 ntohl(sp->msix_info[i].data));
7027 } else
7028 msix_rx_cnt++;
7029 if (err) {
7030 remove_msix_isr(sp);
7031
7032 DBG_PRINT(ERR_DBG,
7033 "%s:MSI-X-%d registration "
7034 "failed\n", dev->name, i);
7035
7036 DBG_PRINT(ERR_DBG,
7037 "%s: Defaulting to INTA\n",
7038 dev->name);
7039 sp->config.intr_type = INTA;
7040 break;
7041 }
7042 sp->s2io_entries[i].in_use =
7043 MSIX_REGISTERED_SUCCESS;
7044 }
7045 }
7046 if (!err) {
7047 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
7048 DBG_PRINT(INFO_DBG,
7049 "MSI-X-TX entries enabled through alarm vector\n");
7050 }
7051 }
7052 if (sp->config.intr_type == INTA) {
7053 err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
7054 sp->name, dev);
7055 if (err) {
7056 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7057 dev->name);
7058 return -1;
7059 }
7060 }
7061 return 0;
7062}
7063
7064static void s2io_rem_isr(struct s2io_nic *sp)
7065{
7066 if (sp->config.intr_type == MSI_X)
7067 remove_msix_isr(sp);
7068 else
7069 remove_inta_isr(sp);
7070}
7071
7072static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7073{
7074 int cnt = 0;
7075 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7076 register u64 val64 = 0;
7077 struct config_param *config;
7078 config = &sp->config;
7079
7080 if (!is_s2io_card_up(sp))
7081 return;
7082
7083 del_timer_sync(&sp->alarm_timer);
7084 /* If s2io_set_link task is executing, wait till it completes. */
7085 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7086 msleep(50);
7087 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7088
7089 /* Disable napi */
7090 if (sp->config.napi) {
7091 int off = 0;
7092 if (config->intr_type == MSI_X) {
7093 for (; off < sp->config.rx_ring_num; off++)
7094 napi_disable(&sp->mac_control.rings[off].napi);
7095 }
7096 else
7097 napi_disable(&sp->napi);
7098 }
7099
7100 /* disable Tx and Rx traffic on the NIC */
7101 if (do_io)
7102 stop_nic(sp);
7103
7104 s2io_rem_isr(sp);
7105
7106 /* stop the tx queue, indicate link down */
7107 s2io_link(sp, LINK_DOWN);
7108
7109 /* Check if the device is Quiescent and then Reset the NIC */
7110 while (do_io) {
7111 /* As per the HW requirement we need to replenish the
7112 * receive buffer to avoid the ring bump. Since there is
7113 * no intention of processing the Rx frame at this pointwe are
7114 * just setting the ownership bit of rxd in Each Rx
7115 * ring to HW and set the appropriate buffer size
7116 * based on the ring mode
7117 */
7118 rxd_owner_bit_reset(sp);
7119
7120 val64 = readq(&bar0->adapter_status);
7121 if (verify_xena_quiescence(sp)) {
7122 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7123 break;
7124 }
7125
7126 msleep(50);
7127 cnt++;
7128 if (cnt == 10) {
7129 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7130 "adapter status reads 0x%llx\n",
7131 (unsigned long long)val64);
7132 break;
7133 }
7134 }
7135 if (do_io)
7136 s2io_reset(sp);
7137
7138 /* Free all Tx buffers */
7139 free_tx_buffers(sp);
7140
7141 /* Free all Rx buffers */
7142 free_rx_buffers(sp);
7143
7144 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7145}
7146
7147static void s2io_card_down(struct s2io_nic *sp)
7148{
7149 do_s2io_card_down(sp, 1);
7150}
7151
7152static int s2io_card_up(struct s2io_nic *sp)
7153{
7154 int i, ret = 0;
7155 struct config_param *config;
7156 struct mac_info *mac_control;
7157 struct net_device *dev = (struct net_device *)sp->dev;
7158 u16 interruptible;
7159
7160 /* Initialize the H/W I/O registers */
7161 ret = init_nic(sp);
7162 if (ret != 0) {
7163 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7164 dev->name);
7165 if (ret != -EIO)
7166 s2io_reset(sp);
7167 return ret;
7168 }
7169
7170 /*
7171 * Initializing the Rx buffers. For now we are considering only 1
7172 * Rx ring and initializing buffers into 30 Rx blocks
7173 */
7174 config = &sp->config;
7175 mac_control = &sp->mac_control;
7176
7177 for (i = 0; i < config->rx_ring_num; i++) {
7178 struct ring_info *ring = &mac_control->rings[i];
7179
7180 ring->mtu = dev->mtu;
7181 ring->lro = !!(dev->features & NETIF_F_LRO);
7182 ret = fill_rx_buffers(sp, ring, 1);
7183 if (ret) {
7184 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7185 dev->name);
7186 s2io_reset(sp);
7187 free_rx_buffers(sp);
7188 return -ENOMEM;
7189 }
7190 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7191 ring->rx_bufs_left);
7192 }
7193
7194 /* Initialise napi */
7195 if (config->napi) {
7196 if (config->intr_type == MSI_X) {
7197 for (i = 0; i < sp->config.rx_ring_num; i++)
7198 napi_enable(&sp->mac_control.rings[i].napi);
7199 } else {
7200 napi_enable(&sp->napi);
7201 }
7202 }
7203
7204 /* Maintain the state prior to the open */
7205 if (sp->promisc_flg)
7206 sp->promisc_flg = 0;
7207 if (sp->m_cast_flg) {
7208 sp->m_cast_flg = 0;
7209 sp->all_multi_pos = 0;
7210 }
7211
7212 /* Setting its receive mode */
7213 s2io_set_multicast(dev);
7214
7215 if (dev->features & NETIF_F_LRO) {
7216 /* Initialize max aggregatable pkts per session based on MTU */
7217 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7218 /* Check if we can use (if specified) user provided value */
7219 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7220 sp->lro_max_aggr_per_sess = lro_max_pkts;
7221 }
7222
7223 /* Enable Rx Traffic and interrupts on the NIC */
7224 if (start_nic(sp)) {
7225 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7226 s2io_reset(sp);
7227 free_rx_buffers(sp);
7228 return -ENODEV;
7229 }
7230
7231 /* Add interrupt service routine */
7232 if (s2io_add_isr(sp) != 0) {
7233 if (sp->config.intr_type == MSI_X)
7234 s2io_rem_isr(sp);
7235 s2io_reset(sp);
7236 free_rx_buffers(sp);
7237 return -ENODEV;
7238 }
7239
7240 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7241
7242 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7243
7244 /* Enable select interrupts */
7245 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7246 if (sp->config.intr_type != INTA) {
7247 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7248 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7249 } else {
7250 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7251 interruptible |= TX_PIC_INTR;
7252 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7253 }
7254
7255 return 0;
7256}
7257
7258/**
7259 * s2io_restart_nic - Resets the NIC.
7260 * @data : long pointer to the device private structure
7261 * Description:
7262 * This function is scheduled to be run by the s2io_tx_watchdog
7263 * function after 0.5 secs to reset the NIC. The idea is to reduce
7264 * the run time of the watch dog routine which is run holding a
7265 * spin lock.
7266 */
7267
7268static void s2io_restart_nic(struct work_struct *work)
7269{
7270 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7271 struct net_device *dev = sp->dev;
7272
7273 rtnl_lock();
7274
7275 if (!netif_running(dev))
7276 goto out_unlock;
7277
7278 s2io_card_down(sp);
7279 if (s2io_card_up(sp)) {
7280 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7281 }
7282 s2io_wake_all_tx_queue(sp);
7283 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7284out_unlock:
7285 rtnl_unlock();
7286}
7287
7288/**
7289 * s2io_tx_watchdog - Watchdog for transmit side.
7290 * @dev : Pointer to net device structure
7291 * Description:
7292 * This function is triggered if the Tx Queue is stopped
7293 * for a pre-defined amount of time when the Interface is still up.
7294 * If the Interface is jammed in such a situation, the hardware is
7295 * reset (by s2io_close) and restarted again (by s2io_open) to
7296 * overcome any problem that might have been caused in the hardware.
7297 * Return value:
7298 * void
7299 */
7300
7301static void s2io_tx_watchdog(struct net_device *dev)
7302{
7303 struct s2io_nic *sp = netdev_priv(dev);
7304 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7305
7306 if (netif_carrier_ok(dev)) {
7307 swstats->watchdog_timer_cnt++;
7308 schedule_work(&sp->rst_timer_task);
7309 swstats->soft_reset_cnt++;
7310 }
7311}
7312
7313/**
7314 * rx_osm_handler - To perform some OS related operations on SKB.
7315 * @sp: private member of the device structure,pointer to s2io_nic structure.
7316 * @skb : the socket buffer pointer.
7317 * @len : length of the packet
7318 * @cksum : FCS checksum of the frame.
7319 * @ring_no : the ring from which this RxD was extracted.
7320 * Description:
7321 * This function is called by the Rx interrupt serivce routine to perform
7322 * some OS related operations on the SKB before passing it to the upper
7323 * layers. It mainly checks if the checksum is OK, if so adds it to the
7324 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7325 * to the upper layer. If the checksum is wrong, it increments the Rx
7326 * packet error count, frees the SKB and returns error.
7327 * Return value:
7328 * SUCCESS on success and -1 on failure.
7329 */
7330static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7331{
7332 struct s2io_nic *sp = ring_data->nic;
7333 struct net_device *dev = (struct net_device *)ring_data->dev;
7334 struct sk_buff *skb = (struct sk_buff *)
7335 ((unsigned long)rxdp->Host_Control);
7336 int ring_no = ring_data->ring_no;
7337 u16 l3_csum, l4_csum;
7338 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7339 struct lro *uninitialized_var(lro);
7340 u8 err_mask;
7341 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7342
7343 skb->dev = dev;
7344
7345 if (err) {
7346 /* Check for parity error */
7347 if (err & 0x1)
7348 swstats->parity_err_cnt++;
7349
7350 err_mask = err >> 48;
7351 switch (err_mask) {
7352 case 1:
7353 swstats->rx_parity_err_cnt++;
7354 break;
7355
7356 case 2:
7357 swstats->rx_abort_cnt++;
7358 break;
7359
7360 case 3:
7361 swstats->rx_parity_abort_cnt++;
7362 break;
7363
7364 case 4:
7365 swstats->rx_rda_fail_cnt++;
7366 break;
7367
7368 case 5:
7369 swstats->rx_unkn_prot_cnt++;
7370 break;
7371
7372 case 6:
7373 swstats->rx_fcs_err_cnt++;
7374 break;
7375
7376 case 7:
7377 swstats->rx_buf_size_err_cnt++;
7378 break;
7379
7380 case 8:
7381 swstats->rx_rxd_corrupt_cnt++;
7382 break;
7383
7384 case 15:
7385 swstats->rx_unkn_err_cnt++;
7386 break;
7387 }
7388 /*
7389 * Drop the packet if bad transfer code. Exception being
7390 * 0x5, which could be due to unsupported IPv6 extension header.
7391 * In this case, we let stack handle the packet.
7392 * Note that in this case, since checksum will be incorrect,
7393 * stack will validate the same.
7394 */
7395 if (err_mask != 0x5) {
7396 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7397 dev->name, err_mask);
7398 dev->stats.rx_crc_errors++;
7399 swstats->mem_freed
7400 += skb->truesize;
7401 dev_kfree_skb(skb);
7402 ring_data->rx_bufs_left -= 1;
7403 rxdp->Host_Control = 0;
7404 return 0;
7405 }
7406 }
7407
7408 rxdp->Host_Control = 0;
7409 if (sp->rxd_mode == RXD_MODE_1) {
7410 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7411
7412 skb_put(skb, len);
7413 } else if (sp->rxd_mode == RXD_MODE_3B) {
7414 int get_block = ring_data->rx_curr_get_info.block_index;
7415 int get_off = ring_data->rx_curr_get_info.offset;
7416 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7417 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7418 unsigned char *buff = skb_push(skb, buf0_len);
7419
7420 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7421 memcpy(buff, ba->ba_0, buf0_len);
7422 skb_put(skb, buf2_len);
7423 }
7424
7425 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7426 ((!ring_data->lro) ||
7427 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7428 (dev->features & NETIF_F_RXCSUM)) {
7429 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7430 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7431 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7432 /*
7433 * NIC verifies if the Checksum of the received
7434 * frame is Ok or not and accordingly returns
7435 * a flag in the RxD.
7436 */
7437 skb->ip_summed = CHECKSUM_UNNECESSARY;
7438 if (ring_data->lro) {
7439 u32 tcp_len = 0;
7440 u8 *tcp;
7441 int ret = 0;
7442
7443 ret = s2io_club_tcp_session(ring_data,
7444 skb->data, &tcp,
7445 &tcp_len, &lro,
7446 rxdp, sp);
7447 switch (ret) {
7448 case 3: /* Begin anew */
7449 lro->parent = skb;
7450 goto aggregate;
7451 case 1: /* Aggregate */
7452 lro_append_pkt(sp, lro, skb, tcp_len);
7453 goto aggregate;
7454 case 4: /* Flush session */
7455 lro_append_pkt(sp, lro, skb, tcp_len);
7456 queue_rx_frame(lro->parent,
7457 lro->vlan_tag);
7458 clear_lro_session(lro);
7459 swstats->flush_max_pkts++;
7460 goto aggregate;
7461 case 2: /* Flush both */
7462 lro->parent->data_len = lro->frags_len;
7463 swstats->sending_both++;
7464 queue_rx_frame(lro->parent,
7465 lro->vlan_tag);
7466 clear_lro_session(lro);
7467 goto send_up;
7468 case 0: /* sessions exceeded */
7469 case -1: /* non-TCP or not L2 aggregatable */
7470 case 5: /*
7471 * First pkt in session not
7472 * L3/L4 aggregatable
7473 */
7474 break;
7475 default:
7476 DBG_PRINT(ERR_DBG,
7477 "%s: Samadhana!!\n",
7478 __func__);
7479 BUG();
7480 }
7481 }
7482 } else {
7483 /*
7484 * Packet with erroneous checksum, let the
7485 * upper layers deal with it.
7486 */
7487 skb_checksum_none_assert(skb);
7488 }
7489 } else
7490 skb_checksum_none_assert(skb);
7491
7492 swstats->mem_freed += skb->truesize;
7493send_up:
7494 skb_record_rx_queue(skb, ring_no);
7495 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7496aggregate:
7497 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7498 return SUCCESS;
7499}
7500
7501/**
7502 * s2io_link - stops/starts the Tx queue.
7503 * @sp : private member of the device structure, which is a pointer to the
7504 * s2io_nic structure.
7505 * @link : inidicates whether link is UP/DOWN.
7506 * Description:
7507 * This function stops/starts the Tx queue depending on whether the link
7508 * status of the NIC is is down or up. This is called by the Alarm
7509 * interrupt handler whenever a link change interrupt comes up.
7510 * Return value:
7511 * void.
7512 */
7513
7514static void s2io_link(struct s2io_nic *sp, int link)
7515{
7516 struct net_device *dev = (struct net_device *)sp->dev;
7517 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7518
7519 if (link != sp->last_link_state) {
7520 init_tti(sp, link);
7521 if (link == LINK_DOWN) {
7522 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7523 s2io_stop_all_tx_queue(sp);
7524 netif_carrier_off(dev);
7525 if (swstats->link_up_cnt)
7526 swstats->link_up_time =
7527 jiffies - sp->start_time;
7528 swstats->link_down_cnt++;
7529 } else {
7530 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7531 if (swstats->link_down_cnt)
7532 swstats->link_down_time =
7533 jiffies - sp->start_time;
7534 swstats->link_up_cnt++;
7535 netif_carrier_on(dev);
7536 s2io_wake_all_tx_queue(sp);
7537 }
7538 }
7539 sp->last_link_state = link;
7540 sp->start_time = jiffies;
7541}
7542
7543/**
7544 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7545 * @sp : private member of the device structure, which is a pointer to the
7546 * s2io_nic structure.
7547 * Description:
7548 * This function initializes a few of the PCI and PCI-X configuration registers
7549 * with recommended values.
7550 * Return value:
7551 * void
7552 */
7553
7554static void s2io_init_pci(struct s2io_nic *sp)
7555{
7556 u16 pci_cmd = 0, pcix_cmd = 0;
7557
7558 /* Enable Data Parity Error Recovery in PCI-X command register. */
7559 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7560 &(pcix_cmd));
7561 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7562 (pcix_cmd | 1));
7563 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7564 &(pcix_cmd));
7565
7566 /* Set the PErr Response bit in PCI command register. */
7567 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7568 pci_write_config_word(sp->pdev, PCI_COMMAND,
7569 (pci_cmd | PCI_COMMAND_PARITY));
7570 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7571}
7572
7573static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7574 u8 *dev_multiq)
7575{
7576 int i;
7577
7578 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7579 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7580 "(%d) not supported\n", tx_fifo_num);
7581
7582 if (tx_fifo_num < 1)
7583 tx_fifo_num = 1;
7584 else
7585 tx_fifo_num = MAX_TX_FIFOS;
7586
7587 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7588 }
7589
7590 if (multiq)
7591 *dev_multiq = multiq;
7592
7593 if (tx_steering_type && (1 == tx_fifo_num)) {
7594 if (tx_steering_type != TX_DEFAULT_STEERING)
7595 DBG_PRINT(ERR_DBG,
7596 "Tx steering is not supported with "
7597 "one fifo. Disabling Tx steering.\n");
7598 tx_steering_type = NO_STEERING;
7599 }
7600
7601 if ((tx_steering_type < NO_STEERING) ||
7602 (tx_steering_type > TX_DEFAULT_STEERING)) {
7603 DBG_PRINT(ERR_DBG,
7604 "Requested transmit steering not supported\n");
7605 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7606 tx_steering_type = NO_STEERING;
7607 }
7608
7609 if (rx_ring_num > MAX_RX_RINGS) {
7610 DBG_PRINT(ERR_DBG,
7611 "Requested number of rx rings not supported\n");
7612 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7613 MAX_RX_RINGS);
7614 rx_ring_num = MAX_RX_RINGS;
7615 }
7616
7617 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7618 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7619 "Defaulting to INTA\n");
7620 *dev_intr_type = INTA;
7621 }
7622
7623 if ((*dev_intr_type == MSI_X) &&
7624 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7625 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7626 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7627 "Defaulting to INTA\n");
7628 *dev_intr_type = INTA;
7629 }
7630
7631 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7632 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7633 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7634 rx_ring_mode = 1;
7635 }
7636
7637 for (i = 0; i < MAX_RX_RINGS; i++)
7638 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7639 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7640 "supported\nDefaulting to %d\n",
7641 MAX_RX_BLOCKS_PER_RING);
7642 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7643 }
7644
7645 return SUCCESS;
7646}
7647
7648/**
7649 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7650 * or Traffic class respectively.
7651 * @nic: device private variable
7652 * Description: The function configures the receive steering to
7653 * desired receive ring.
7654 * Return Value: SUCCESS on success and
7655 * '-1' on failure (endian settings incorrect).
7656 */
7657static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7658{
7659 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7660 register u64 val64 = 0;
7661
7662 if (ds_codepoint > 63)
7663 return FAILURE;
7664
7665 val64 = RTS_DS_MEM_DATA(ring);
7666 writeq(val64, &bar0->rts_ds_mem_data);
7667
7668 val64 = RTS_DS_MEM_CTRL_WE |
7669 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7670 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7671
7672 writeq(val64, &bar0->rts_ds_mem_ctrl);
7673
7674 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7675 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7676 S2IO_BIT_RESET);
7677}
7678
7679static const struct net_device_ops s2io_netdev_ops = {
7680 .ndo_open = s2io_open,
7681 .ndo_stop = s2io_close,
7682 .ndo_get_stats = s2io_get_stats,
7683 .ndo_start_xmit = s2io_xmit,
7684 .ndo_validate_addr = eth_validate_addr,
7685 .ndo_set_multicast_list = s2io_set_multicast,
7686 .ndo_do_ioctl = s2io_ioctl,
7687 .ndo_set_mac_address = s2io_set_mac_addr,
7688 .ndo_change_mtu = s2io_change_mtu,
7689 .ndo_set_features = s2io_set_features,
7690 .ndo_tx_timeout = s2io_tx_watchdog,
7691#ifdef CONFIG_NET_POLL_CONTROLLER
7692 .ndo_poll_controller = s2io_netpoll,
7693#endif
7694};
7695
7696/**
7697 * s2io_init_nic - Initialization of the adapter .
7698 * @pdev : structure containing the PCI related information of the device.
7699 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7700 * Description:
7701 * The function initializes an adapter identified by the pci_dec structure.
7702 * All OS related initialization including memory and device structure and
7703 * initlaization of the device private variable is done. Also the swapper
7704 * control register is initialized to enable read and write into the I/O
7705 * registers of the device.
7706 * Return value:
7707 * returns 0 on success and negative on failure.
7708 */
7709
7710static int __devinit
7711s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7712{
7713 struct s2io_nic *sp;
7714 struct net_device *dev;
7715 int i, j, ret;
7716 int dma_flag = false;
7717 u32 mac_up, mac_down;
7718 u64 val64 = 0, tmp64 = 0;
7719 struct XENA_dev_config __iomem *bar0 = NULL;
7720 u16 subid;
7721 struct config_param *config;
7722 struct mac_info *mac_control;
7723 int mode;
7724 u8 dev_intr_type = intr_type;
7725 u8 dev_multiq = 0;
7726
7727 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7728 if (ret)
7729 return ret;
7730
7731 ret = pci_enable_device(pdev);
7732 if (ret) {
7733 DBG_PRINT(ERR_DBG,
7734 "%s: pci_enable_device failed\n", __func__);
7735 return ret;
7736 }
7737
7738 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7739 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7740 dma_flag = true;
7741 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7742 DBG_PRINT(ERR_DBG,
7743 "Unable to obtain 64bit DMA "
7744 "for consistent allocations\n");
7745 pci_disable_device(pdev);
7746 return -ENOMEM;
7747 }
7748 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7749 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7750 } else {
7751 pci_disable_device(pdev);
7752 return -ENOMEM;
7753 }
7754 ret = pci_request_regions(pdev, s2io_driver_name);
7755 if (ret) {
7756 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7757 __func__, ret);
7758 pci_disable_device(pdev);
7759 return -ENODEV;
7760 }
7761 if (dev_multiq)
7762 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7763 else
7764 dev = alloc_etherdev(sizeof(struct s2io_nic));
7765 if (dev == NULL) {
7766 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7767 pci_disable_device(pdev);
7768 pci_release_regions(pdev);
7769 return -ENODEV;
7770 }
7771
7772 pci_set_master(pdev);
7773 pci_set_drvdata(pdev, dev);
7774 SET_NETDEV_DEV(dev, &pdev->dev);
7775
7776 /* Private member variable initialized to s2io NIC structure */
7777 sp = netdev_priv(dev);
7778 sp->dev = dev;
7779 sp->pdev = pdev;
7780 sp->high_dma_flag = dma_flag;
7781 sp->device_enabled_once = false;
7782 if (rx_ring_mode == 1)
7783 sp->rxd_mode = RXD_MODE_1;
7784 if (rx_ring_mode == 2)
7785 sp->rxd_mode = RXD_MODE_3B;
7786
7787 sp->config.intr_type = dev_intr_type;
7788
7789 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7790 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7791 sp->device_type = XFRAME_II_DEVICE;
7792 else
7793 sp->device_type = XFRAME_I_DEVICE;
7794
7795
7796 /* Initialize some PCI/PCI-X fields of the NIC. */
7797 s2io_init_pci(sp);
7798
7799 /*
7800 * Setting the device configuration parameters.
7801 * Most of these parameters can be specified by the user during
7802 * module insertion as they are module loadable parameters. If
7803 * these parameters are not not specified during load time, they
7804 * are initialized with default values.
7805 */
7806 config = &sp->config;
7807 mac_control = &sp->mac_control;
7808
7809 config->napi = napi;
7810 config->tx_steering_type = tx_steering_type;
7811
7812 /* Tx side parameters. */
7813 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7814 config->tx_fifo_num = MAX_TX_FIFOS;
7815 else
7816 config->tx_fifo_num = tx_fifo_num;
7817
7818 /* Initialize the fifos used for tx steering */
7819 if (config->tx_fifo_num < 5) {
7820 if (config->tx_fifo_num == 1)
7821 sp->total_tcp_fifos = 1;
7822 else
7823 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7824 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7825 sp->total_udp_fifos = 1;
7826 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7827 } else {
7828 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7829 FIFO_OTHER_MAX_NUM);
7830 sp->udp_fifo_idx = sp->total_tcp_fifos;
7831 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7832 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7833 }
7834
7835 config->multiq = dev_multiq;
7836 for (i = 0; i < config->tx_fifo_num; i++) {
7837 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7838
7839 tx_cfg->fifo_len = tx_fifo_len[i];
7840 tx_cfg->fifo_priority = i;
7841 }
7842
7843 /* mapping the QoS priority to the configured fifos */
7844 for (i = 0; i < MAX_TX_FIFOS; i++)
7845 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7846
7847 /* map the hashing selector table to the configured fifos */
7848 for (i = 0; i < config->tx_fifo_num; i++)
7849 sp->fifo_selector[i] = fifo_selector[i];
7850
7851
7852 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7853 for (i = 0; i < config->tx_fifo_num; i++) {
7854 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7855
7856 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7857 if (tx_cfg->fifo_len < 65) {
7858 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7859 break;
7860 }
7861 }
7862 /* + 2 because one Txd for skb->data and one Txd for UFO */
7863 config->max_txds = MAX_SKB_FRAGS + 2;
7864
7865 /* Rx side parameters. */
7866 config->rx_ring_num = rx_ring_num;
7867 for (i = 0; i < config->rx_ring_num; i++) {
7868 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7869 struct ring_info *ring = &mac_control->rings[i];
7870
7871 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7872 rx_cfg->ring_priority = i;
7873 ring->rx_bufs_left = 0;
7874 ring->rxd_mode = sp->rxd_mode;
7875 ring->rxd_count = rxd_count[sp->rxd_mode];
7876 ring->pdev = sp->pdev;
7877 ring->dev = sp->dev;
7878 }
7879
7880 for (i = 0; i < rx_ring_num; i++) {
7881 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7882
7883 rx_cfg->ring_org = RING_ORG_BUFF1;
7884 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7885 }
7886
7887 /* Setting Mac Control parameters */
7888 mac_control->rmac_pause_time = rmac_pause_time;
7889 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7890 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7891
7892
7893 /* initialize the shared memory used by the NIC and the host */
7894 if (init_shared_mem(sp)) {
7895 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7896 ret = -ENOMEM;
7897 goto mem_alloc_failed;
7898 }
7899
7900 sp->bar0 = pci_ioremap_bar(pdev, 0);
7901 if (!sp->bar0) {
7902 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7903 dev->name);
7904 ret = -ENOMEM;
7905 goto bar0_remap_failed;
7906 }
7907
7908 sp->bar1 = pci_ioremap_bar(pdev, 2);
7909 if (!sp->bar1) {
7910 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7911 dev->name);
7912 ret = -ENOMEM;
7913 goto bar1_remap_failed;
7914 }
7915
7916 dev->irq = pdev->irq;
7917 dev->base_addr = (unsigned long)sp->bar0;
7918
7919 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7920 for (j = 0; j < MAX_TX_FIFOS; j++) {
7921 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7922 }
7923
7924 /* Driver entry points */
7925 dev->netdev_ops = &s2io_netdev_ops;
7926 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7927 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7928 NETIF_F_TSO | NETIF_F_TSO6 |
7929 NETIF_F_RXCSUM | NETIF_F_LRO;
7930 dev->features |= dev->hw_features |
7931 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7932 if (sp->device_type & XFRAME_II_DEVICE) {
7933 dev->hw_features |= NETIF_F_UFO;
7934 if (ufo)
7935 dev->features |= NETIF_F_UFO;
7936 }
7937 if (sp->high_dma_flag == true)
7938 dev->features |= NETIF_F_HIGHDMA;
7939 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7940 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7941 INIT_WORK(&sp->set_link_task, s2io_set_link);
7942
7943 pci_save_state(sp->pdev);
7944
7945 /* Setting swapper control on the NIC, for proper reset operation */
7946 if (s2io_set_swapper(sp)) {
7947 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7948 dev->name);
7949 ret = -EAGAIN;
7950 goto set_swap_failed;
7951 }
7952
7953 /* Verify if the Herc works on the slot its placed into */
7954 if (sp->device_type & XFRAME_II_DEVICE) {
7955 mode = s2io_verify_pci_mode(sp);
7956 if (mode < 0) {
7957 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7958 __func__);
7959 ret = -EBADSLT;
7960 goto set_swap_failed;
7961 }
7962 }
7963
7964 if (sp->config.intr_type == MSI_X) {
7965 sp->num_entries = config->rx_ring_num + 1;
7966 ret = s2io_enable_msi_x(sp);
7967
7968 if (!ret) {
7969 ret = s2io_test_msi(sp);
7970 /* rollback MSI-X, will re-enable during add_isr() */
7971 remove_msix_isr(sp);
7972 }
7973 if (ret) {
7974
7975 DBG_PRINT(ERR_DBG,
7976 "MSI-X requested but failed to enable\n");
7977 sp->config.intr_type = INTA;
7978 }
7979 }
7980
7981 if (config->intr_type == MSI_X) {
7982 for (i = 0; i < config->rx_ring_num ; i++) {
7983 struct ring_info *ring = &mac_control->rings[i];
7984
7985 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7986 }
7987 } else {
7988 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7989 }
7990
7991 /* Not needed for Herc */
7992 if (sp->device_type & XFRAME_I_DEVICE) {
7993 /*
7994 * Fix for all "FFs" MAC address problems observed on
7995 * Alpha platforms
7996 */
7997 fix_mac_address(sp);
7998 s2io_reset(sp);
7999 }
8000
8001 /*
8002 * MAC address initialization.
8003 * For now only one mac address will be read and used.
8004 */
8005 bar0 = sp->bar0;
8006 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8007 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8008 writeq(val64, &bar0->rmac_addr_cmd_mem);
8009 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8010 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
8011 S2IO_BIT_RESET);
8012 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8013 mac_down = (u32)tmp64;
8014 mac_up = (u32) (tmp64 >> 32);
8015
8016 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8017 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8018 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8019 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8020 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8021 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8022
8023 /* Set the factory defined MAC address initially */
8024 dev->addr_len = ETH_ALEN;
8025 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8026 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8027
8028 /* initialize number of multicast & unicast MAC entries variables */
8029 if (sp->device_type == XFRAME_I_DEVICE) {
8030 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8031 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8032 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8033 } else if (sp->device_type == XFRAME_II_DEVICE) {
8034 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8035 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8036 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8037 }
8038
8039 /* store mac addresses from CAM to s2io_nic structure */
8040 do_s2io_store_unicast_mc(sp);
8041
8042 /* Configure MSIX vector for number of rings configured plus one */
8043 if ((sp->device_type == XFRAME_II_DEVICE) &&
8044 (config->intr_type == MSI_X))
8045 sp->num_entries = config->rx_ring_num + 1;
8046
8047 /* Store the values of the MSIX table in the s2io_nic structure */
8048 store_xmsi_data(sp);
8049 /* reset Nic and bring it to known state */
8050 s2io_reset(sp);
8051
8052 /*
8053 * Initialize link state flags
8054 * and the card state parameter
8055 */
8056 sp->state = 0;
8057
8058 /* Initialize spinlocks */
8059 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8060 struct fifo_info *fifo = &mac_control->fifos[i];
8061
8062 spin_lock_init(&fifo->tx_lock);
8063 }
8064
8065 /*
8066 * SXE-002: Configure link and activity LED to init state
8067 * on driver load.
8068 */
8069 subid = sp->pdev->subsystem_device;
8070 if ((subid & 0xFF) >= 0x07) {
8071 val64 = readq(&bar0->gpio_control);
8072 val64 |= 0x0000800000000000ULL;
8073 writeq(val64, &bar0->gpio_control);
8074 val64 = 0x0411040400000000ULL;
8075 writeq(val64, (void __iomem *)bar0 + 0x2700);
8076 val64 = readq(&bar0->gpio_control);
8077 }
8078
8079 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8080
8081 if (register_netdev(dev)) {
8082 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8083 ret = -ENODEV;
8084 goto register_failed;
8085 }
8086 s2io_vpd_read(sp);
8087 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8088 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8089 sp->product_name, pdev->revision);
8090 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8091 s2io_driver_version);
8092 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8093 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8094 if (sp->device_type & XFRAME_II_DEVICE) {
8095 mode = s2io_print_pci_mode(sp);
8096 if (mode < 0) {
8097 ret = -EBADSLT;
8098 unregister_netdev(dev);
8099 goto set_swap_failed;
8100 }
8101 }
8102 switch (sp->rxd_mode) {
8103 case RXD_MODE_1:
8104 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8105 dev->name);
8106 break;
8107 case RXD_MODE_3B:
8108 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8109 dev->name);
8110 break;
8111 }
8112
8113 switch (sp->config.napi) {
8114 case 0:
8115 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8116 break;
8117 case 1:
8118 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8119 break;
8120 }
8121
8122 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8123 sp->config.tx_fifo_num);
8124
8125 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8126 sp->config.rx_ring_num);
8127
8128 switch (sp->config.intr_type) {
8129 case INTA:
8130 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8131 break;
8132 case MSI_X:
8133 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8134 break;
8135 }
8136 if (sp->config.multiq) {
8137 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8138 struct fifo_info *fifo = &mac_control->fifos[i];
8139
8140 fifo->multiq = config->multiq;
8141 }
8142 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8143 dev->name);
8144 } else
8145 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8146 dev->name);
8147
8148 switch (sp->config.tx_steering_type) {
8149 case NO_STEERING:
8150 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8151 dev->name);
8152 break;
8153 case TX_PRIORITY_STEERING:
8154 DBG_PRINT(ERR_DBG,
8155 "%s: Priority steering enabled for transmit\n",
8156 dev->name);
8157 break;
8158 case TX_DEFAULT_STEERING:
8159 DBG_PRINT(ERR_DBG,
8160 "%s: Default steering enabled for transmit\n",
8161 dev->name);
8162 }
8163
8164 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8165 dev->name);
8166 if (ufo)
8167 DBG_PRINT(ERR_DBG,
8168 "%s: UDP Fragmentation Offload(UFO) enabled\n",
8169 dev->name);
8170 /* Initialize device name */
8171 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8172
8173 if (vlan_tag_strip)
8174 sp->vlan_strip_flag = 1;
8175 else
8176 sp->vlan_strip_flag = 0;
8177
8178 /*
8179 * Make Link state as off at this point, when the Link change
8180 * interrupt comes the state will be automatically changed to
8181 * the right state.
8182 */
8183 netif_carrier_off(dev);
8184
8185 return 0;
8186
8187register_failed:
8188set_swap_failed:
8189 iounmap(sp->bar1);
8190bar1_remap_failed:
8191 iounmap(sp->bar0);
8192bar0_remap_failed:
8193mem_alloc_failed:
8194 free_shared_mem(sp);
8195 pci_disable_device(pdev);
8196 pci_release_regions(pdev);
8197 pci_set_drvdata(pdev, NULL);
8198 free_netdev(dev);
8199
8200 return ret;
8201}
8202
8203/**
8204 * s2io_rem_nic - Free the PCI device
8205 * @pdev: structure containing the PCI related information of the device.
8206 * Description: This function is called by the Pci subsystem to release a
8207 * PCI device and free up all resource held up by the device. This could
8208 * be in response to a Hot plug event or when the driver is to be removed
8209 * from memory.
8210 */
8211
8212static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8213{
8214 struct net_device *dev = pci_get_drvdata(pdev);
8215 struct s2io_nic *sp;
8216
8217 if (dev == NULL) {
8218 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8219 return;
8220 }
8221
8222 sp = netdev_priv(dev);
8223
8224 cancel_work_sync(&sp->rst_timer_task);
8225 cancel_work_sync(&sp->set_link_task);
8226
8227 unregister_netdev(dev);
8228
8229 free_shared_mem(sp);
8230 iounmap(sp->bar0);
8231 iounmap(sp->bar1);
8232 pci_release_regions(pdev);
8233 pci_set_drvdata(pdev, NULL);
8234 free_netdev(dev);
8235 pci_disable_device(pdev);
8236}
8237
8238/**
8239 * s2io_starter - Entry point for the driver
8240 * Description: This function is the entry point for the driver. It verifies
8241 * the module loadable parameters and initializes PCI configuration space.
8242 */
8243
8244static int __init s2io_starter(void)
8245{
8246 return pci_register_driver(&s2io_driver);
8247}
8248
8249/**
8250 * s2io_closer - Cleanup routine for the driver
8251 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8252 */
8253
8254static __exit void s2io_closer(void)
8255{
8256 pci_unregister_driver(&s2io_driver);
8257 DBG_PRINT(INIT_DBG, "cleanup done\n");
8258}
8259
8260module_init(s2io_starter);
8261module_exit(s2io_closer);
8262
8263static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8264 struct tcphdr **tcp, struct RxD_t *rxdp,
8265 struct s2io_nic *sp)
8266{
8267 int ip_off;
8268 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8269
8270 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8271 DBG_PRINT(INIT_DBG,
8272 "%s: Non-TCP frames not supported for LRO\n",
8273 __func__);
8274 return -1;
8275 }
8276
8277 /* Checking for DIX type or DIX type with VLAN */
8278 if ((l2_type == 0) || (l2_type == 4)) {
8279 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8280 /*
8281 * If vlan stripping is disabled and the frame is VLAN tagged,
8282 * shift the offset by the VLAN header size bytes.
8283 */
8284 if ((!sp->vlan_strip_flag) &&
8285 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8286 ip_off += HEADER_VLAN_SIZE;
8287 } else {
8288 /* LLC, SNAP etc are considered non-mergeable */
8289 return -1;
8290 }
8291
8292 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8293 ip_len = (u8)((*ip)->ihl);
8294 ip_len <<= 2;
8295 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8296
8297 return 0;
8298}
8299
8300static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8301 struct tcphdr *tcp)
8302{
8303 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8304 if ((lro->iph->saddr != ip->saddr) ||
8305 (lro->iph->daddr != ip->daddr) ||
8306 (lro->tcph->source != tcp->source) ||
8307 (lro->tcph->dest != tcp->dest))
8308 return -1;
8309 return 0;
8310}
8311
8312static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8313{
8314 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8315}
8316
8317static void initiate_new_session(struct lro *lro, u8 *l2h,
8318 struct iphdr *ip, struct tcphdr *tcp,
8319 u32 tcp_pyld_len, u16 vlan_tag)
8320{
8321 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8322 lro->l2h = l2h;
8323 lro->iph = ip;
8324 lro->tcph = tcp;
8325 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8326 lro->tcp_ack = tcp->ack_seq;
8327 lro->sg_num = 1;
8328 lro->total_len = ntohs(ip->tot_len);
8329 lro->frags_len = 0;
8330 lro->vlan_tag = vlan_tag;
8331 /*
8332 * Check if we saw TCP timestamp.
8333 * Other consistency checks have already been done.
8334 */
8335 if (tcp->doff == 8) {
8336 __be32 *ptr;
8337 ptr = (__be32 *)(tcp+1);
8338 lro->saw_ts = 1;
8339 lro->cur_tsval = ntohl(*(ptr+1));
8340 lro->cur_tsecr = *(ptr+2);
8341 }
8342 lro->in_use = 1;
8343}
8344
8345static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8346{
8347 struct iphdr *ip = lro->iph;
8348 struct tcphdr *tcp = lro->tcph;
8349 __sum16 nchk;
8350 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8351
8352 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8353
8354 /* Update L3 header */
8355 ip->tot_len = htons(lro->total_len);
8356 ip->check = 0;
8357 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8358 ip->check = nchk;
8359
8360 /* Update L4 header */
8361 tcp->ack_seq = lro->tcp_ack;
8362 tcp->window = lro->window;
8363
8364 /* Update tsecr field if this session has timestamps enabled */
8365 if (lro->saw_ts) {
8366 __be32 *ptr = (__be32 *)(tcp + 1);
8367 *(ptr+2) = lro->cur_tsecr;
8368 }
8369
8370 /* Update counters required for calculation of
8371 * average no. of packets aggregated.
8372 */
8373 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8374 swstats->num_aggregations++;
8375}
8376
8377static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8378 struct tcphdr *tcp, u32 l4_pyld)
8379{
8380 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8381 lro->total_len += l4_pyld;
8382 lro->frags_len += l4_pyld;
8383 lro->tcp_next_seq += l4_pyld;
8384 lro->sg_num++;
8385
8386 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8387 lro->tcp_ack = tcp->ack_seq;
8388 lro->window = tcp->window;
8389
8390 if (lro->saw_ts) {
8391 __be32 *ptr;
8392 /* Update tsecr and tsval from this packet */
8393 ptr = (__be32 *)(tcp+1);
8394 lro->cur_tsval = ntohl(*(ptr+1));
8395 lro->cur_tsecr = *(ptr + 2);
8396 }
8397}
8398
8399static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8400 struct tcphdr *tcp, u32 tcp_pyld_len)
8401{
8402 u8 *ptr;
8403
8404 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8405
8406 if (!tcp_pyld_len) {
8407 /* Runt frame or a pure ack */
8408 return -1;
8409 }
8410
8411 if (ip->ihl != 5) /* IP has options */
8412 return -1;
8413
8414 /* If we see CE codepoint in IP header, packet is not mergeable */
8415 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8416 return -1;
8417
8418 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8419 if (tcp->urg || tcp->psh || tcp->rst ||
8420 tcp->syn || tcp->fin ||
8421 tcp->ece || tcp->cwr || !tcp->ack) {
8422 /*
8423 * Currently recognize only the ack control word and
8424 * any other control field being set would result in
8425 * flushing the LRO session
8426 */
8427 return -1;
8428 }
8429
8430 /*
8431 * Allow only one TCP timestamp option. Don't aggregate if
8432 * any other options are detected.
8433 */
8434 if (tcp->doff != 5 && tcp->doff != 8)
8435 return -1;
8436
8437 if (tcp->doff == 8) {
8438 ptr = (u8 *)(tcp + 1);
8439 while (*ptr == TCPOPT_NOP)
8440 ptr++;
8441 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8442 return -1;
8443
8444 /* Ensure timestamp value increases monotonically */
8445 if (l_lro)
8446 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8447 return -1;
8448
8449 /* timestamp echo reply should be non-zero */
8450 if (*((__be32 *)(ptr+6)) == 0)
8451 return -1;
8452 }
8453
8454 return 0;
8455}
8456
8457static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8458 u8 **tcp, u32 *tcp_len, struct lro **lro,
8459 struct RxD_t *rxdp, struct s2io_nic *sp)
8460{
8461 struct iphdr *ip;
8462 struct tcphdr *tcph;
8463 int ret = 0, i;
8464 u16 vlan_tag = 0;
8465 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8466
8467 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8468 rxdp, sp);
8469 if (ret)
8470 return ret;
8471
8472 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8473
8474 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8475 tcph = (struct tcphdr *)*tcp;
8476 *tcp_len = get_l4_pyld_length(ip, tcph);
8477 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8478 struct lro *l_lro = &ring_data->lro0_n[i];
8479 if (l_lro->in_use) {
8480 if (check_for_socket_match(l_lro, ip, tcph))
8481 continue;
8482 /* Sock pair matched */
8483 *lro = l_lro;
8484
8485 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8486 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8487 "expected 0x%x, actual 0x%x\n",
8488 __func__,
8489 (*lro)->tcp_next_seq,
8490 ntohl(tcph->seq));
8491
8492 swstats->outof_sequence_pkts++;
8493 ret = 2;
8494 break;
8495 }
8496
8497 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8498 *tcp_len))
8499 ret = 1; /* Aggregate */
8500 else
8501 ret = 2; /* Flush both */
8502 break;
8503 }
8504 }
8505
8506 if (ret == 0) {
8507 /* Before searching for available LRO objects,
8508 * check if the pkt is L3/L4 aggregatable. If not
8509 * don't create new LRO session. Just send this
8510 * packet up.
8511 */
8512 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8513 return 5;
8514
8515 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8516 struct lro *l_lro = &ring_data->lro0_n[i];
8517 if (!(l_lro->in_use)) {
8518 *lro = l_lro;
8519 ret = 3; /* Begin anew */
8520 break;
8521 }
8522 }
8523 }
8524
8525 if (ret == 0) { /* sessions exceeded */
8526 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8527 __func__);
8528 *lro = NULL;
8529 return ret;
8530 }
8531
8532 switch (ret) {
8533 case 3:
8534 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8535 vlan_tag);
8536 break;
8537 case 2:
8538 update_L3L4_header(sp, *lro);
8539 break;
8540 case 1:
8541 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8542 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8543 update_L3L4_header(sp, *lro);
8544 ret = 4; /* Flush the LRO */
8545 }
8546 break;
8547 default:
8548 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8549 break;
8550 }
8551
8552 return ret;
8553}
8554
8555static void clear_lro_session(struct lro *lro)
8556{
8557 static u16 lro_struct_size = sizeof(struct lro);
8558
8559 memset(lro, 0, lro_struct_size);
8560}
8561
8562static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8563{
8564 struct net_device *dev = skb->dev;
8565 struct s2io_nic *sp = netdev_priv(dev);
8566
8567 skb->protocol = eth_type_trans(skb, dev);
8568 if (vlan_tag && sp->vlan_strip_flag)
8569 __vlan_hwaccel_put_tag(skb, vlan_tag);
8570 if (sp->config.napi)
8571 netif_receive_skb(skb);
8572 else
8573 netif_rx(skb);
8574}
8575
8576static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8577 struct sk_buff *skb, u32 tcp_len)
8578{
8579 struct sk_buff *first = lro->parent;
8580 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8581
8582 first->len += tcp_len;
8583 first->data_len = lro->frags_len;
8584 skb_pull(skb, (skb->len - tcp_len));
8585 if (skb_shinfo(first)->frag_list)
8586 lro->last_frag->next = skb;
8587 else
8588 skb_shinfo(first)->frag_list = skb;
8589 first->truesize += skb->truesize;
8590 lro->last_frag = skb;
8591 swstats->clubbed_frms_cnt++;
8592}
8593
8594/**
8595 * s2io_io_error_detected - called when PCI error is detected
8596 * @pdev: Pointer to PCI device
8597 * @state: The current pci connection state
8598 *
8599 * This function is called after a PCI bus error affecting
8600 * this device has been detected.
8601 */
8602static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8603 pci_channel_state_t state)
8604{
8605 struct net_device *netdev = pci_get_drvdata(pdev);
8606 struct s2io_nic *sp = netdev_priv(netdev);
8607
8608 netif_device_detach(netdev);
8609
8610 if (state == pci_channel_io_perm_failure)
8611 return PCI_ERS_RESULT_DISCONNECT;
8612
8613 if (netif_running(netdev)) {
8614 /* Bring down the card, while avoiding PCI I/O */
8615 do_s2io_card_down(sp, 0);
8616 }
8617 pci_disable_device(pdev);
8618
8619 return PCI_ERS_RESULT_NEED_RESET;
8620}
8621
8622/**
8623 * s2io_io_slot_reset - called after the pci bus has been reset.
8624 * @pdev: Pointer to PCI device
8625 *
8626 * Restart the card from scratch, as if from a cold-boot.
8627 * At this point, the card has exprienced a hard reset,
8628 * followed by fixups by BIOS, and has its config space
8629 * set up identically to what it was at cold boot.
8630 */
8631static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8632{
8633 struct net_device *netdev = pci_get_drvdata(pdev);
8634 struct s2io_nic *sp = netdev_priv(netdev);
8635
8636 if (pci_enable_device(pdev)) {
8637 pr_err("Cannot re-enable PCI device after reset.\n");
8638 return PCI_ERS_RESULT_DISCONNECT;
8639 }
8640
8641 pci_set_master(pdev);
8642 s2io_reset(sp);
8643
8644 return PCI_ERS_RESULT_RECOVERED;
8645}
8646
8647/**
8648 * s2io_io_resume - called when traffic can start flowing again.
8649 * @pdev: Pointer to PCI device
8650 *
8651 * This callback is called when the error recovery driver tells
8652 * us that its OK to resume normal operation.
8653 */
8654static void s2io_io_resume(struct pci_dev *pdev)
8655{
8656 struct net_device *netdev = pci_get_drvdata(pdev);
8657 struct s2io_nic *sp = netdev_priv(netdev);
8658
8659 if (netif_running(netdev)) {
8660 if (s2io_card_up(sp)) {
8661 pr_err("Can't bring device back up after reset.\n");
8662 return;
8663 }
8664
8665 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8666 s2io_card_down(sp);
8667 pr_err("Can't restore mac addr after reset.\n");
8668 return;
8669 }
8670 }
8671
8672 netif_device_attach(netdev);
8673 netif_tx_wake_all_queues(netdev);
8674}
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
new file mode 100644
index 000000000000..d5596926a1ef
--- /dev/null
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -0,0 +1,1148 @@
1/************************************************************************
2 * s2io.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2010 Exar Corp.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 ************************************************************************/
13#ifndef _S2IO_H
14#define _S2IO_H
15
16#define TBD 0
17#define s2BIT(loc) (0x8000000000000000ULL >> (loc))
18#define vBIT(val, loc, sz) (((u64)val) << (64-loc-sz))
19#define INV(d) ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff)
20
21#undef SUCCESS
22#define SUCCESS 0
23#define FAILURE -1
24#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL
25#define S2IO_DISABLE_MAC_ENTRY 0xFFFFFFFFFFFFULL
26#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100
27#define S2IO_BIT_RESET 1
28#define S2IO_BIT_SET 2
29#define CHECKBIT(value, nbit) (value & (1 << nbit))
30
31/* Maximum time to flicker LED when asked to identify NIC using ethtool */
32#define MAX_FLICKER_TIME 60000 /* 60 Secs */
33
34/* Maximum outstanding splits to be configured into xena. */
35enum {
36 XENA_ONE_SPLIT_TRANSACTION = 0,
37 XENA_TWO_SPLIT_TRANSACTION = 1,
38 XENA_THREE_SPLIT_TRANSACTION = 2,
39 XENA_FOUR_SPLIT_TRANSACTION = 3,
40 XENA_EIGHT_SPLIT_TRANSACTION = 4,
41 XENA_TWELVE_SPLIT_TRANSACTION = 5,
42 XENA_SIXTEEN_SPLIT_TRANSACTION = 6,
43 XENA_THIRTYTWO_SPLIT_TRANSACTION = 7
44};
45#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
46
47/* OS concerned variables and constants */
48#define WATCH_DOG_TIMEOUT 15*HZ
49#define EFILL 0x1234
50#define ALIGN_SIZE 127
51#define PCIX_COMMAND_REGISTER 0x62
52
53/*
54 * Debug related variables.
55 */
56/* different debug levels. */
57#define ERR_DBG 0
58#define INIT_DBG 1
59#define INFO_DBG 2
60#define TX_DBG 3
61#define INTR_DBG 4
62
63/* Global variable that defines the present debug level of the driver. */
64static int debug_level = ERR_DBG;
65
66/* DEBUG message print. */
67#define DBG_PRINT(dbg_level, fmt, args...) do { \
68 if (dbg_level <= debug_level) \
69 pr_info(fmt, ##args); \
70 } while (0)
71
72/* Protocol assist features of the NIC */
73#define L3_CKSUM_OK 0xFFFF
74#define L4_CKSUM_OK 0xFFFF
75#define S2IO_JUMBO_SIZE 9600
76
77/* Driver statistics maintained by driver */
78struct swStat {
79 unsigned long long single_ecc_errs;
80 unsigned long long double_ecc_errs;
81 unsigned long long parity_err_cnt;
82 unsigned long long serious_err_cnt;
83 unsigned long long soft_reset_cnt;
84 unsigned long long fifo_full_cnt;
85 unsigned long long ring_full_cnt[8];
86 /* LRO statistics */
87 unsigned long long clubbed_frms_cnt;
88 unsigned long long sending_both;
89 unsigned long long outof_sequence_pkts;
90 unsigned long long flush_max_pkts;
91 unsigned long long sum_avg_pkts_aggregated;
92 unsigned long long num_aggregations;
93 /* Other statistics */
94 unsigned long long mem_alloc_fail_cnt;
95 unsigned long long pci_map_fail_cnt;
96 unsigned long long watchdog_timer_cnt;
97 unsigned long long mem_allocated;
98 unsigned long long mem_freed;
99 unsigned long long link_up_cnt;
100 unsigned long long link_down_cnt;
101 unsigned long long link_up_time;
102 unsigned long long link_down_time;
103
104 /* Transfer Code statistics */
105 unsigned long long tx_buf_abort_cnt;
106 unsigned long long tx_desc_abort_cnt;
107 unsigned long long tx_parity_err_cnt;
108 unsigned long long tx_link_loss_cnt;
109 unsigned long long tx_list_proc_err_cnt;
110
111 unsigned long long rx_parity_err_cnt;
112 unsigned long long rx_abort_cnt;
113 unsigned long long rx_parity_abort_cnt;
114 unsigned long long rx_rda_fail_cnt;
115 unsigned long long rx_unkn_prot_cnt;
116 unsigned long long rx_fcs_err_cnt;
117 unsigned long long rx_buf_size_err_cnt;
118 unsigned long long rx_rxd_corrupt_cnt;
119 unsigned long long rx_unkn_err_cnt;
120
121 /* Error/alarm statistics*/
122 unsigned long long tda_err_cnt;
123 unsigned long long pfc_err_cnt;
124 unsigned long long pcc_err_cnt;
125 unsigned long long tti_err_cnt;
126 unsigned long long lso_err_cnt;
127 unsigned long long tpa_err_cnt;
128 unsigned long long sm_err_cnt;
129 unsigned long long mac_tmac_err_cnt;
130 unsigned long long mac_rmac_err_cnt;
131 unsigned long long xgxs_txgxs_err_cnt;
132 unsigned long long xgxs_rxgxs_err_cnt;
133 unsigned long long rc_err_cnt;
134 unsigned long long prc_pcix_err_cnt;
135 unsigned long long rpa_err_cnt;
136 unsigned long long rda_err_cnt;
137 unsigned long long rti_err_cnt;
138 unsigned long long mc_err_cnt;
139
140};
141
142/* Xpak releated alarm and warnings */
143struct xpakStat {
144 u64 alarm_transceiver_temp_high;
145 u64 alarm_transceiver_temp_low;
146 u64 alarm_laser_bias_current_high;
147 u64 alarm_laser_bias_current_low;
148 u64 alarm_laser_output_power_high;
149 u64 alarm_laser_output_power_low;
150 u64 warn_transceiver_temp_high;
151 u64 warn_transceiver_temp_low;
152 u64 warn_laser_bias_current_high;
153 u64 warn_laser_bias_current_low;
154 u64 warn_laser_output_power_high;
155 u64 warn_laser_output_power_low;
156 u64 xpak_regs_stat;
157 u32 xpak_timer_count;
158};
159
160
161/* The statistics block of Xena */
162struct stat_block {
163/* Tx MAC statistics counters. */
164 __le32 tmac_data_octets;
165 __le32 tmac_frms;
166 __le64 tmac_drop_frms;
167 __le32 tmac_bcst_frms;
168 __le32 tmac_mcst_frms;
169 __le64 tmac_pause_ctrl_frms;
170 __le32 tmac_ucst_frms;
171 __le32 tmac_ttl_octets;
172 __le32 tmac_any_err_frms;
173 __le32 tmac_nucst_frms;
174 __le64 tmac_ttl_less_fb_octets;
175 __le64 tmac_vld_ip_octets;
176 __le32 tmac_drop_ip;
177 __le32 tmac_vld_ip;
178 __le32 tmac_rst_tcp;
179 __le32 tmac_icmp;
180 __le64 tmac_tcp;
181 __le32 reserved_0;
182 __le32 tmac_udp;
183
184/* Rx MAC Statistics counters. */
185 __le32 rmac_data_octets;
186 __le32 rmac_vld_frms;
187 __le64 rmac_fcs_err_frms;
188 __le64 rmac_drop_frms;
189 __le32 rmac_vld_bcst_frms;
190 __le32 rmac_vld_mcst_frms;
191 __le32 rmac_out_rng_len_err_frms;
192 __le32 rmac_in_rng_len_err_frms;
193 __le64 rmac_long_frms;
194 __le64 rmac_pause_ctrl_frms;
195 __le64 rmac_unsup_ctrl_frms;
196 __le32 rmac_accepted_ucst_frms;
197 __le32 rmac_ttl_octets;
198 __le32 rmac_discarded_frms;
199 __le32 rmac_accepted_nucst_frms;
200 __le32 reserved_1;
201 __le32 rmac_drop_events;
202 __le64 rmac_ttl_less_fb_octets;
203 __le64 rmac_ttl_frms;
204 __le64 reserved_2;
205 __le32 rmac_usized_frms;
206 __le32 reserved_3;
207 __le32 rmac_frag_frms;
208 __le32 rmac_osized_frms;
209 __le32 reserved_4;
210 __le32 rmac_jabber_frms;
211 __le64 rmac_ttl_64_frms;
212 __le64 rmac_ttl_65_127_frms;
213 __le64 reserved_5;
214 __le64 rmac_ttl_128_255_frms;
215 __le64 rmac_ttl_256_511_frms;
216 __le64 reserved_6;
217 __le64 rmac_ttl_512_1023_frms;
218 __le64 rmac_ttl_1024_1518_frms;
219 __le32 rmac_ip;
220 __le32 reserved_7;
221 __le64 rmac_ip_octets;
222 __le32 rmac_drop_ip;
223 __le32 rmac_hdr_err_ip;
224 __le32 reserved_8;
225 __le32 rmac_icmp;
226 __le64 rmac_tcp;
227 __le32 rmac_err_drp_udp;
228 __le32 rmac_udp;
229 __le64 rmac_xgmii_err_sym;
230 __le64 rmac_frms_q0;
231 __le64 rmac_frms_q1;
232 __le64 rmac_frms_q2;
233 __le64 rmac_frms_q3;
234 __le64 rmac_frms_q4;
235 __le64 rmac_frms_q5;
236 __le64 rmac_frms_q6;
237 __le64 rmac_frms_q7;
238 __le16 rmac_full_q3;
239 __le16 rmac_full_q2;
240 __le16 rmac_full_q1;
241 __le16 rmac_full_q0;
242 __le16 rmac_full_q7;
243 __le16 rmac_full_q6;
244 __le16 rmac_full_q5;
245 __le16 rmac_full_q4;
246 __le32 reserved_9;
247 __le32 rmac_pause_cnt;
248 __le64 rmac_xgmii_data_err_cnt;
249 __le64 rmac_xgmii_ctrl_err_cnt;
250 __le32 rmac_err_tcp;
251 __le32 rmac_accepted_ip;
252
253/* PCI/PCI-X Read transaction statistics. */
254 __le32 new_rd_req_cnt;
255 __le32 rd_req_cnt;
256 __le32 rd_rtry_cnt;
257 __le32 new_rd_req_rtry_cnt;
258
259/* PCI/PCI-X Write/Read transaction statistics. */
260 __le32 wr_req_cnt;
261 __le32 wr_rtry_rd_ack_cnt;
262 __le32 new_wr_req_rtry_cnt;
263 __le32 new_wr_req_cnt;
264 __le32 wr_disc_cnt;
265 __le32 wr_rtry_cnt;
266
267/* PCI/PCI-X Write / DMA Transaction statistics. */
268 __le32 txp_wr_cnt;
269 __le32 rd_rtry_wr_ack_cnt;
270 __le32 txd_wr_cnt;
271 __le32 txd_rd_cnt;
272 __le32 rxd_wr_cnt;
273 __le32 rxd_rd_cnt;
274 __le32 rxf_wr_cnt;
275 __le32 txf_rd_cnt;
276
277/* Tx MAC statistics overflow counters. */
278 __le32 tmac_data_octets_oflow;
279 __le32 tmac_frms_oflow;
280 __le32 tmac_bcst_frms_oflow;
281 __le32 tmac_mcst_frms_oflow;
282 __le32 tmac_ucst_frms_oflow;
283 __le32 tmac_ttl_octets_oflow;
284 __le32 tmac_any_err_frms_oflow;
285 __le32 tmac_nucst_frms_oflow;
286 __le64 tmac_vlan_frms;
287 __le32 tmac_drop_ip_oflow;
288 __le32 tmac_vld_ip_oflow;
289 __le32 tmac_rst_tcp_oflow;
290 __le32 tmac_icmp_oflow;
291 __le32 tpa_unknown_protocol;
292 __le32 tmac_udp_oflow;
293 __le32 reserved_10;
294 __le32 tpa_parse_failure;
295
296/* Rx MAC Statistics overflow counters. */
297 __le32 rmac_data_octets_oflow;
298 __le32 rmac_vld_frms_oflow;
299 __le32 rmac_vld_bcst_frms_oflow;
300 __le32 rmac_vld_mcst_frms_oflow;
301 __le32 rmac_accepted_ucst_frms_oflow;
302 __le32 rmac_ttl_octets_oflow;
303 __le32 rmac_discarded_frms_oflow;
304 __le32 rmac_accepted_nucst_frms_oflow;
305 __le32 rmac_usized_frms_oflow;
306 __le32 rmac_drop_events_oflow;
307 __le32 rmac_frag_frms_oflow;
308 __le32 rmac_osized_frms_oflow;
309 __le32 rmac_ip_oflow;
310 __le32 rmac_jabber_frms_oflow;
311 __le32 rmac_icmp_oflow;
312 __le32 rmac_drop_ip_oflow;
313 __le32 rmac_err_drp_udp_oflow;
314 __le32 rmac_udp_oflow;
315 __le32 reserved_11;
316 __le32 rmac_pause_cnt_oflow;
317 __le64 rmac_ttl_1519_4095_frms;
318 __le64 rmac_ttl_4096_8191_frms;
319 __le64 rmac_ttl_8192_max_frms;
320 __le64 rmac_ttl_gt_max_frms;
321 __le64 rmac_osized_alt_frms;
322 __le64 rmac_jabber_alt_frms;
323 __le64 rmac_gt_max_alt_frms;
324 __le64 rmac_vlan_frms;
325 __le32 rmac_len_discard;
326 __le32 rmac_fcs_discard;
327 __le32 rmac_pf_discard;
328 __le32 rmac_da_discard;
329 __le32 rmac_red_discard;
330 __le32 rmac_rts_discard;
331 __le32 reserved_12;
332 __le32 rmac_ingm_full_discard;
333 __le32 reserved_13;
334 __le32 rmac_accepted_ip_oflow;
335 __le32 reserved_14;
336 __le32 link_fault_cnt;
337 u8 buffer[20];
338 struct swStat sw_stat;
339 struct xpakStat xpak_stat;
340};
341
342/* Default value for 'vlan_strip_tag' configuration parameter */
343#define NO_STRIP_IN_PROMISC 2
344
345/*
346 * Structures representing different init time configuration
347 * parameters of the NIC.
348 */
349
350#define MAX_TX_FIFOS 8
351#define MAX_RX_RINGS 8
352
353#define FIFO_DEFAULT_NUM 5
354#define FIFO_UDP_MAX_NUM 2 /* 0 - even, 1 -odd ports */
355#define FIFO_OTHER_MAX_NUM 1
356
357
358#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 128)
359#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 86)
360#define MAX_TX_DESC (MAX_AVAILABLE_TXDS)
361
362/* FIFO mappings for all possible number of fifos configured */
363static const int fifo_map[][MAX_TX_FIFOS] = {
364 {0, 0, 0, 0, 0, 0, 0, 0},
365 {0, 0, 0, 0, 1, 1, 1, 1},
366 {0, 0, 0, 1, 1, 1, 2, 2},
367 {0, 0, 1, 1, 2, 2, 3, 3},
368 {0, 0, 1, 1, 2, 2, 3, 4},
369 {0, 0, 1, 1, 2, 3, 4, 5},
370 {0, 0, 1, 2, 3, 4, 5, 6},
371 {0, 1, 2, 3, 4, 5, 6, 7},
372};
373
374static const u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
375
376/* Maintains Per FIFO related information. */
377struct tx_fifo_config {
378#define MAX_AVAILABLE_TXDS 8192
379 u32 fifo_len; /* specifies len of FIFO up to 8192, ie no of TxDLs */
380/* Priority definition */
381#define TX_FIFO_PRI_0 0 /*Highest */
382#define TX_FIFO_PRI_1 1
383#define TX_FIFO_PRI_2 2
384#define TX_FIFO_PRI_3 3
385#define TX_FIFO_PRI_4 4
386#define TX_FIFO_PRI_5 5
387#define TX_FIFO_PRI_6 6
388#define TX_FIFO_PRI_7 7 /*lowest */
389 u8 fifo_priority; /* specifies pointer level for FIFO */
390 /* user should not set twos fifos with same pri */
391 u8 f_no_snoop;
392#define NO_SNOOP_TXD 0x01
393#define NO_SNOOP_TXD_BUFFER 0x02
394};
395
396
397/* Maintains per Ring related information */
398struct rx_ring_config {
399 u32 num_rxd; /*No of RxDs per Rx Ring */
400#define RX_RING_PRI_0 0 /* highest */
401#define RX_RING_PRI_1 1
402#define RX_RING_PRI_2 2
403#define RX_RING_PRI_3 3
404#define RX_RING_PRI_4 4
405#define RX_RING_PRI_5 5
406#define RX_RING_PRI_6 6
407#define RX_RING_PRI_7 7 /* lowest */
408
409 u8 ring_priority; /*Specifies service priority of ring */
410 /* OSM should not set any two rings with same priority */
411 u8 ring_org; /*Organization of ring */
412#define RING_ORG_BUFF1 0x01
413#define RX_RING_ORG_BUFF3 0x03
414#define RX_RING_ORG_BUFF5 0x05
415
416 u8 f_no_snoop;
417#define NO_SNOOP_RXD 0x01
418#define NO_SNOOP_RXD_BUFFER 0x02
419};
420
421/* This structure provides contains values of the tunable parameters
422 * of the H/W
423 */
424struct config_param {
425/* Tx Side */
426 u32 tx_fifo_num; /*Number of Tx FIFOs */
427
428 /* 0-No steering, 1-Priority steering, 2-Default fifo map */
429#define NO_STEERING 0
430#define TX_PRIORITY_STEERING 0x1
431#define TX_DEFAULT_STEERING 0x2
432 u8 tx_steering_type;
433
434 u8 fifo_mapping[MAX_TX_FIFOS];
435 struct tx_fifo_config tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
436 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
437 u64 tx_intr_type;
438#define INTA 0
439#define MSI_X 2
440 u8 intr_type;
441 u8 napi;
442
443 /* Specifies if Tx Intr is UTILZ or PER_LIST type. */
444
445/* Rx Side */
446 u32 rx_ring_num; /*Number of receive rings */
447#define MAX_RX_BLOCKS_PER_RING 150
448
449 struct rx_ring_config rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
450
451#define HEADER_ETHERNET_II_802_3_SIZE 14
452#define HEADER_802_2_SIZE 3
453#define HEADER_SNAP_SIZE 5
454#define HEADER_VLAN_SIZE 4
455
456#define MIN_MTU 46
457#define MAX_PYLD 1500
458#define MAX_MTU (MAX_PYLD+18)
459#define MAX_MTU_VLAN (MAX_PYLD+22)
460#define MAX_PYLD_JUMBO 9600
461#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18)
462#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22)
463 u16 bus_speed;
464 int max_mc_addr; /* xena=64 herc=256 */
465 int max_mac_addr; /* xena=16 herc=64 */
466 int mc_start_offset; /* xena=16 herc=64 */
467 u8 multiq;
468};
469
470/* Structure representing MAC Addrs */
471struct mac_addr {
472 u8 mac_addr[ETH_ALEN];
473};
474
475/* Structure that represent every FIFO element in the BAR1
476 * Address location.
477 */
478struct TxFIFO_element {
479 u64 TxDL_Pointer;
480
481 u64 List_Control;
482#define TX_FIFO_LAST_TXD_NUM( val) vBIT(val,0,8)
483#define TX_FIFO_FIRST_LIST s2BIT(14)
484#define TX_FIFO_LAST_LIST s2BIT(15)
485#define TX_FIFO_FIRSTNLAST_LIST vBIT(3,14,2)
486#define TX_FIFO_SPECIAL_FUNC s2BIT(23)
487#define TX_FIFO_DS_NO_SNOOP s2BIT(31)
488#define TX_FIFO_BUFF_NO_SNOOP s2BIT(30)
489};
490
491/* Tx descriptor structure */
492struct TxD {
493 u64 Control_1;
494/* bit mask */
495#define TXD_LIST_OWN_XENA s2BIT(7)
496#define TXD_T_CODE (s2BIT(12)|s2BIT(13)|s2BIT(14)|s2BIT(15))
497#define TXD_T_CODE_OK(val) (|(val & TXD_T_CODE))
498#define GET_TXD_T_CODE(val) ((val & TXD_T_CODE)<<12)
499#define TXD_GATHER_CODE (s2BIT(22) | s2BIT(23))
500#define TXD_GATHER_CODE_FIRST s2BIT(22)
501#define TXD_GATHER_CODE_LAST s2BIT(23)
502#define TXD_TCP_LSO_EN s2BIT(30)
503#define TXD_UDP_COF_EN s2BIT(31)
504#define TXD_UFO_EN s2BIT(31) | s2BIT(30)
505#define TXD_TCP_LSO_MSS(val) vBIT(val,34,14)
506#define TXD_UFO_MSS(val) vBIT(val,34,14)
507#define TXD_BUFFER0_SIZE(val) vBIT(val,48,16)
508
509 u64 Control_2;
510#define TXD_TX_CKO_CONTROL (s2BIT(5)|s2BIT(6)|s2BIT(7))
511#define TXD_TX_CKO_IPV4_EN s2BIT(5)
512#define TXD_TX_CKO_TCP_EN s2BIT(6)
513#define TXD_TX_CKO_UDP_EN s2BIT(7)
514#define TXD_VLAN_ENABLE s2BIT(15)
515#define TXD_VLAN_TAG(val) vBIT(val,16,16)
516#define TXD_INT_NUMBER(val) vBIT(val,34,6)
517#define TXD_INT_TYPE_PER_LIST s2BIT(47)
518#define TXD_INT_TYPE_UTILZ s2BIT(46)
519#define TXD_SET_MARKER vBIT(0x6,0,4)
520
521 u64 Buffer_Pointer;
522 u64 Host_Control; /* reserved for host */
523};
524
525/* Structure to hold the phy and virt addr of every TxDL. */
526struct list_info_hold {
527 dma_addr_t list_phy_addr;
528 void *list_virt_addr;
529};
530
531/* Rx descriptor structure for 1 buffer mode */
532struct RxD_t {
533 u64 Host_Control; /* reserved for host */
534 u64 Control_1;
535#define RXD_OWN_XENA s2BIT(7)
536#define RXD_T_CODE (s2BIT(12)|s2BIT(13)|s2BIT(14)|s2BIT(15))
537#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8)
538#define RXD_FRAME_VLAN_TAG s2BIT(24)
539#define RXD_FRAME_PROTO_IPV4 s2BIT(27)
540#define RXD_FRAME_PROTO_IPV6 s2BIT(28)
541#define RXD_FRAME_IP_FRAG s2BIT(29)
542#define RXD_FRAME_PROTO_TCP s2BIT(30)
543#define RXD_FRAME_PROTO_UDP s2BIT(31)
544#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP)
545#define RXD_GET_L3_CKSUM(val) ((u16)(val>> 16) & 0xFFFF)
546#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF)
547
548 u64 Control_2;
549#define THE_RXD_MARK 0x3
550#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2)
551#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62)
552
553#define MASK_VLAN_TAG vBIT(0xFFFF,48,16)
554#define SET_VLAN_TAG(val) vBIT(val,48,16)
555#define SET_NUM_TAG(val) vBIT(val,16,32)
556
557
558};
559/* Rx descriptor structure for 1 buffer mode */
560struct RxD1 {
561 struct RxD_t h;
562
563#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14)
564#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14)
565#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \
566 (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48)
567 u64 Buffer0_ptr;
568};
569/* Rx descriptor structure for 3 or 2 buffer mode */
570
571struct RxD3 {
572 struct RxD_t h;
573
574#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14)
575#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16)
576#define MASK_BUFFER2_SIZE_3 vBIT(0xFFFF,32,16)
577#define SET_BUFFER0_SIZE_3(val) vBIT(val,8,8)
578#define SET_BUFFER1_SIZE_3(val) vBIT(val,16,16)
579#define SET_BUFFER2_SIZE_3(val) vBIT(val,32,16)
580#define RXD_GET_BUFFER0_SIZE_3(Control_2) \
581 (u8)((Control_2 & MASK_BUFFER0_SIZE_3) >> 48)
582#define RXD_GET_BUFFER1_SIZE_3(Control_2) \
583 (u16)((Control_2 & MASK_BUFFER1_SIZE_3) >> 32)
584#define RXD_GET_BUFFER2_SIZE_3(Control_2) \
585 (u16)((Control_2 & MASK_BUFFER2_SIZE_3) >> 16)
586#define BUF0_LEN 40
587#define BUF1_LEN 1
588
589 u64 Buffer0_ptr;
590 u64 Buffer1_ptr;
591 u64 Buffer2_ptr;
592};
593
594
595/* Structure that represents the Rx descriptor block which contains
596 * 128 Rx descriptors.
597 */
598struct RxD_block {
599#define MAX_RXDS_PER_BLOCK_1 127
600 struct RxD1 rxd[MAX_RXDS_PER_BLOCK_1];
601
602 u64 reserved_0;
603#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
604 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last
605 * Rxd in this blk */
606 u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */
607 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
608 * the upper 32 bits should
609 * be 0 */
610};
611
612#define SIZE_OF_BLOCK 4096
613
614#define RXD_MODE_1 0 /* One Buffer mode */
615#define RXD_MODE_3B 1 /* Two Buffer mode */
616
617/* Structure to hold virtual addresses of Buf0 and Buf1 in
618 * 2buf mode. */
619struct buffAdd {
620 void *ba_0_org;
621 void *ba_1_org;
622 void *ba_0;
623 void *ba_1;
624};
625
626/* Structure which stores all the MAC control parameters */
627
628/* This structure stores the offset of the RxD in the ring
629 * from which the Rx Interrupt processor can start picking
630 * up the RxDs for processing.
631 */
632struct rx_curr_get_info {
633 u32 block_index;
634 u32 offset;
635 u32 ring_len;
636};
637
638struct rx_curr_put_info {
639 u32 block_index;
640 u32 offset;
641 u32 ring_len;
642};
643
644/* This structure stores the offset of the TxDl in the FIFO
645 * from which the Tx Interrupt processor can start picking
646 * up the TxDLs for send complete interrupt processing.
647 */
648struct tx_curr_get_info {
649 u32 offset;
650 u32 fifo_len;
651};
652
653struct tx_curr_put_info {
654 u32 offset;
655 u32 fifo_len;
656};
657
658struct rxd_info {
659 void *virt_addr;
660 dma_addr_t dma_addr;
661};
662
663/* Structure that holds the Phy and virt addresses of the Blocks */
664struct rx_block_info {
665 void *block_virt_addr;
666 dma_addr_t block_dma_addr;
667 struct rxd_info *rxds;
668};
669
670/* Data structure to represent a LRO session */
671struct lro {
672 struct sk_buff *parent;
673 struct sk_buff *last_frag;
674 u8 *l2h;
675 struct iphdr *iph;
676 struct tcphdr *tcph;
677 u32 tcp_next_seq;
678 __be32 tcp_ack;
679 int total_len;
680 int frags_len;
681 int sg_num;
682 int in_use;
683 __be16 window;
684 u16 vlan_tag;
685 u32 cur_tsval;
686 __be32 cur_tsecr;
687 u8 saw_ts;
688} ____cacheline_aligned;
689
690/* Ring specific structure */
691struct ring_info {
692 /* The ring number */
693 int ring_no;
694
695 /* per-ring buffer counter */
696 u32 rx_bufs_left;
697
698#define MAX_LRO_SESSIONS 32
699 struct lro lro0_n[MAX_LRO_SESSIONS];
700 u8 lro;
701
702 /* copy of sp->rxd_mode flag */
703 int rxd_mode;
704
705 /* Number of rxds per block for the rxd_mode */
706 int rxd_count;
707
708 /* copy of sp pointer */
709 struct s2io_nic *nic;
710
711 /* copy of sp->dev pointer */
712 struct net_device *dev;
713
714 /* copy of sp->pdev pointer */
715 struct pci_dev *pdev;
716
717 /* Per ring napi struct */
718 struct napi_struct napi;
719
720 unsigned long interrupt_count;
721
722 /*
723 * Place holders for the virtual and physical addresses of
724 * all the Rx Blocks
725 */
726 struct rx_block_info rx_blocks[MAX_RX_BLOCKS_PER_RING];
727 int block_count;
728 int pkt_cnt;
729
730 /*
731 * Put pointer info which indictes which RxD has to be replenished
732 * with a new buffer.
733 */
734 struct rx_curr_put_info rx_curr_put_info;
735
736 /*
737 * Get pointer info which indictes which is the last RxD that was
738 * processed by the driver.
739 */
740 struct rx_curr_get_info rx_curr_get_info;
741
742 /* interface MTU value */
743 unsigned mtu;
744
745 /* Buffer Address store. */
746 struct buffAdd **ba;
747} ____cacheline_aligned;
748
749/* Fifo specific structure */
750struct fifo_info {
751 /* FIFO number */
752 int fifo_no;
753
754 /* Maximum TxDs per TxDL */
755 int max_txds;
756
757 /* Place holder of all the TX List's Phy and Virt addresses. */
758 struct list_info_hold *list_info;
759
760 /*
761 * Current offset within the tx FIFO where driver would write
762 * new Tx frame
763 */
764 struct tx_curr_put_info tx_curr_put_info;
765
766 /*
767 * Current offset within tx FIFO from where the driver would start freeing
768 * the buffers
769 */
770 struct tx_curr_get_info tx_curr_get_info;
771#define FIFO_QUEUE_START 0
772#define FIFO_QUEUE_STOP 1
773 int queue_state;
774
775 /* copy of sp->dev pointer */
776 struct net_device *dev;
777
778 /* copy of multiq status */
779 u8 multiq;
780
781 /* Per fifo lock */
782 spinlock_t tx_lock;
783
784 /* Per fifo UFO in band structure */
785 u64 *ufo_in_band_v;
786
787 struct s2io_nic *nic;
788} ____cacheline_aligned;
789
790/* Information related to the Tx and Rx FIFOs and Rings of Xena
791 * is maintained in this structure.
792 */
793struct mac_info {
794/* tx side stuff */
795 /* logical pointer of start of each Tx FIFO */
796 struct TxFIFO_element __iomem *tx_FIFO_start[MAX_TX_FIFOS];
797
798 /* Fifo specific structure */
799 struct fifo_info fifos[MAX_TX_FIFOS];
800
801 /* Save virtual address of TxD page with zero DMA addr(if any) */
802 void *zerodma_virt_addr;
803
804/* rx side stuff */
805 /* Ring specific structure */
806 struct ring_info rings[MAX_RX_RINGS];
807
808 u16 rmac_pause_time;
809 u16 mc_pause_threshold_q0q3;
810 u16 mc_pause_threshold_q4q7;
811
812 void *stats_mem; /* orignal pointer to allocated mem */
813 dma_addr_t stats_mem_phy; /* Physical address of the stat block */
814 u32 stats_mem_sz;
815 struct stat_block *stats_info; /* Logical address of the stat block */
816};
817
818/* Default Tunable parameters of the NIC. */
819#define DEFAULT_FIFO_0_LEN 4096
820#define DEFAULT_FIFO_1_7_LEN 512
821#define SMALL_BLK_CNT 30
822#define LARGE_BLK_CNT 100
823
824/*
825 * Structure to keep track of the MSI-X vectors and the corresponding
826 * argument registered against each vector
827 */
828#define MAX_REQUESTED_MSI_X 9
829struct s2io_msix_entry
830{
831 u16 vector;
832 u16 entry;
833 void *arg;
834
835 u8 type;
836#define MSIX_ALARM_TYPE 1
837#define MSIX_RING_TYPE 2
838
839 u8 in_use;
840#define MSIX_REGISTERED_SUCCESS 0xAA
841};
842
843struct msix_info_st {
844 u64 addr;
845 u64 data;
846};
847
848/* These flags represent the devices temporary state */
849enum s2io_device_state_t
850{
851 __S2IO_STATE_LINK_TASK=0,
852 __S2IO_STATE_CARD_UP
853};
854
855/* Structure representing one instance of the NIC */
856struct s2io_nic {
857 int rxd_mode;
858 /*
859 * Count of packets to be processed in a given iteration, it will be indicated
860 * by the quota field of the device structure when NAPI is enabled.
861 */
862 int pkts_to_process;
863 struct net_device *dev;
864 struct mac_info mac_control;
865 struct config_param config;
866 struct pci_dev *pdev;
867 void __iomem *bar0;
868 void __iomem *bar1;
869#define MAX_MAC_SUPPORTED 16
870#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
871
872 struct mac_addr def_mac_addr[256];
873
874 struct net_device_stats stats;
875 int high_dma_flag;
876 int device_enabled_once;
877
878 char name[60];
879
880 /* Timer that handles I/O errors/exceptions */
881 struct timer_list alarm_timer;
882
883 /* Space to back up the PCI config space */
884 u32 config_space[256 / sizeof(u32)];
885
886#define PROMISC 1
887#define ALL_MULTI 2
888
889#define MAX_ADDRS_SUPPORTED 64
890 u16 mc_addr_count;
891
892 u16 m_cast_flg;
893 u16 all_multi_pos;
894 u16 promisc_flg;
895
896 /* Restart timer, used to restart NIC if the device is stuck and
897 * a schedule task that will set the correct Link state once the
898 * NIC's PHY has stabilized after a state change.
899 */
900 struct work_struct rst_timer_task;
901 struct work_struct set_link_task;
902
903 /* Flag that can be used to turn on or turn off the Rx checksum
904 * offload feature.
905 */
906 int rx_csum;
907
908 /* Below variables are used for fifo selection to transmit a packet */
909 u16 fifo_selector[MAX_TX_FIFOS];
910
911 /* Total fifos for tcp packets */
912 u8 total_tcp_fifos;
913
914 /*
915 * Beginning index of udp for udp packets
916 * Value will be equal to
917 * (tx_fifo_num - FIFO_UDP_MAX_NUM - FIFO_OTHER_MAX_NUM)
918 */
919 u8 udp_fifo_idx;
920
921 u8 total_udp_fifos;
922
923 /*
924 * Beginning index of fifo for all other packets
925 * Value will be equal to (tx_fifo_num - FIFO_OTHER_MAX_NUM)
926 */
927 u8 other_fifo_idx;
928
929 struct napi_struct napi;
930 /* after blink, the adapter must be restored with original
931 * values.
932 */
933 u64 adapt_ctrl_org;
934
935 /* Last known link state. */
936 u16 last_link_state;
937#define LINK_DOWN 1
938#define LINK_UP 2
939
940 int task_flag;
941 unsigned long long start_time;
942 int vlan_strip_flag;
943#define MSIX_FLG 0xA5
944 int num_entries;
945 struct msix_entry *entries;
946 int msi_detected;
947 wait_queue_head_t msi_wait;
948 struct s2io_msix_entry *s2io_entries;
949 char desc[MAX_REQUESTED_MSI_X][25];
950
951 int avail_msix_vectors; /* No. of MSI-X vectors granted by system */
952
953 struct msix_info_st msix_info[0x3f];
954
955#define XFRAME_I_DEVICE 1
956#define XFRAME_II_DEVICE 2
957 u8 device_type;
958
959 unsigned long clubbed_frms_cnt;
960 unsigned long sending_both;
961 u16 lro_max_aggr_per_sess;
962 volatile unsigned long state;
963 u64 general_int_mask;
964
965#define VPD_STRING_LEN 80
966 u8 product_name[VPD_STRING_LEN];
967 u8 serial_num[VPD_STRING_LEN];
968};
969
970#define RESET_ERROR 1
971#define CMD_ERROR 2
972
973/* OS related system calls */
974#ifndef readq
975static inline u64 readq(void __iomem *addr)
976{
977 u64 ret = 0;
978 ret = readl(addr + 4);
979 ret <<= 32;
980 ret |= readl(addr);
981
982 return ret;
983}
984#endif
985
986#ifndef writeq
987static inline void writeq(u64 val, void __iomem *addr)
988{
989 writel((u32) (val), addr);
990 writel((u32) (val >> 32), (addr + 4));
991}
992#endif
993
994/*
995 * Some registers have to be written in a particular order to
996 * expect correct hardware operation. The macro SPECIAL_REG_WRITE
997 * is used to perform such ordered writes. Defines UF (Upper First)
998 * and LF (Lower First) will be used to specify the required write order.
999 */
1000#define UF 1
1001#define LF 2
1002static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
1003{
1004 if (order == LF) {
1005 writel((u32) (val), addr);
1006 (void) readl(addr);
1007 writel((u32) (val >> 32), (addr + 4));
1008 (void) readl(addr + 4);
1009 } else {
1010 writel((u32) (val >> 32), (addr + 4));
1011 (void) readl(addr + 4);
1012 writel((u32) (val), addr);
1013 (void) readl(addr);
1014 }
1015}
1016
1017/* Interrupt related values of Xena */
1018
1019#define ENABLE_INTRS 1
1020#define DISABLE_INTRS 2
1021
1022/* Highest level interrupt blocks */
1023#define TX_PIC_INTR (0x0001<<0)
1024#define TX_DMA_INTR (0x0001<<1)
1025#define TX_MAC_INTR (0x0001<<2)
1026#define TX_XGXS_INTR (0x0001<<3)
1027#define TX_TRAFFIC_INTR (0x0001<<4)
1028#define RX_PIC_INTR (0x0001<<5)
1029#define RX_DMA_INTR (0x0001<<6)
1030#define RX_MAC_INTR (0x0001<<7)
1031#define RX_XGXS_INTR (0x0001<<8)
1032#define RX_TRAFFIC_INTR (0x0001<<9)
1033#define MC_INTR (0x0001<<10)
1034#define ENA_ALL_INTRS ( TX_PIC_INTR | \
1035 TX_DMA_INTR | \
1036 TX_MAC_INTR | \
1037 TX_XGXS_INTR | \
1038 TX_TRAFFIC_INTR | \
1039 RX_PIC_INTR | \
1040 RX_DMA_INTR | \
1041 RX_MAC_INTR | \
1042 RX_XGXS_INTR | \
1043 RX_TRAFFIC_INTR | \
1044 MC_INTR )
1045
1046/* Interrupt masks for the general interrupt mask register */
1047#define DISABLE_ALL_INTRS 0xFFFFFFFFFFFFFFFFULL
1048
1049#define TXPIC_INT_M s2BIT(0)
1050#define TXDMA_INT_M s2BIT(1)
1051#define TXMAC_INT_M s2BIT(2)
1052#define TXXGXS_INT_M s2BIT(3)
1053#define TXTRAFFIC_INT_M s2BIT(8)
1054#define PIC_RX_INT_M s2BIT(32)
1055#define RXDMA_INT_M s2BIT(33)
1056#define RXMAC_INT_M s2BIT(34)
1057#define MC_INT_M s2BIT(35)
1058#define RXXGXS_INT_M s2BIT(36)
1059#define RXTRAFFIC_INT_M s2BIT(40)
1060
1061/* PIC level Interrupts TODO*/
1062
1063/* DMA level Inressupts */
1064#define TXDMA_PFC_INT_M s2BIT(0)
1065#define TXDMA_PCC_INT_M s2BIT(2)
1066
1067/* PFC block interrupts */
1068#define PFC_MISC_ERR_1 s2BIT(0) /* Interrupt to indicate FIFO full */
1069
1070/* PCC block interrupts. */
1071#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate
1072 PCC_FB_ECC Error. */
1073
1074#define RXD_GET_VLAN_TAG(Control_2) (u16)(Control_2 & MASK_VLAN_TAG)
1075/*
1076 * Prototype declaration.
1077 */
1078static int __devinit s2io_init_nic(struct pci_dev *pdev,
1079 const struct pci_device_id *pre);
1080static void __devexit s2io_rem_nic(struct pci_dev *pdev);
1081static int init_shared_mem(struct s2io_nic *sp);
1082static void free_shared_mem(struct s2io_nic *sp);
1083static int init_nic(struct s2io_nic *nic);
1084static int rx_intr_handler(struct ring_info *ring_data, int budget);
1085static void s2io_txpic_intr_handle(struct s2io_nic *sp);
1086static void tx_intr_handler(struct fifo_info *fifo_data);
1087static void s2io_handle_errors(void * dev_id);
1088
1089static int s2io_starter(void);
1090static void s2io_closer(void);
1091static void s2io_tx_watchdog(struct net_device *dev);
1092static void s2io_set_multicast(struct net_device *dev);
1093static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
1094static void s2io_link(struct s2io_nic * sp, int link);
1095static void s2io_reset(struct s2io_nic * sp);
1096static int s2io_poll_msix(struct napi_struct *napi, int budget);
1097static int s2io_poll_inta(struct napi_struct *napi, int budget);
1098static void s2io_init_pci(struct s2io_nic * sp);
1099static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr);
1100static void s2io_alarm_handle(unsigned long data);
1101static irqreturn_t
1102s2io_msix_ring_handle(int irq, void *dev_id);
1103static irqreturn_t
1104s2io_msix_fifo_handle(int irq, void *dev_id);
1105static irqreturn_t s2io_isr(int irq, void *dev_id);
1106static int verify_xena_quiescence(struct s2io_nic *sp);
1107static const struct ethtool_ops netdev_ethtool_ops;
1108static void s2io_set_link(struct work_struct *work);
1109static int s2io_set_swapper(struct s2io_nic * sp);
1110static void s2io_card_down(struct s2io_nic *nic);
1111static int s2io_card_up(struct s2io_nic *nic);
1112static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
1113 int bit_state);
1114static int s2io_add_isr(struct s2io_nic * sp);
1115static void s2io_rem_isr(struct s2io_nic * sp);
1116
1117static void restore_xmsi_data(struct s2io_nic *nic);
1118static void do_s2io_store_unicast_mc(struct s2io_nic *sp);
1119static void do_s2io_restore_unicast_mc(struct s2io_nic *sp);
1120static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset);
1121static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr);
1122static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset);
1123static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr);
1124
1125static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
1126 u8 **tcp, u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
1127 struct s2io_nic *sp);
1128static void clear_lro_session(struct lro *lro);
1129static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag);
1130static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
1131static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
1132 struct sk_buff *skb, u32 tcp_len);
1133static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring);
1134
1135static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
1136 pci_channel_state_t state);
1137static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev);
1138static void s2io_io_resume(struct pci_dev *pdev);
1139
1140#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
1141#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
1142#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type
1143
1144#define S2IO_PARM_INT(X, def_val) \
1145 static unsigned int X = def_val;\
1146 module_param(X , uint, 0);
1147
1148#endif /* _S2IO_H */
diff --git a/drivers/net/ethernet/neterion/vxge/Makefile b/drivers/net/ethernet/neterion/vxge/Makefile
new file mode 100644
index 000000000000..b625e2c503f5
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for Exar Corp's X3100 Series 10 GbE PCIe I/O
3# Virtualized Server Adapter linux driver
4
5obj-$(CONFIG_VXGE) += vxge.o
6
7vxge-objs := vxge-config.o vxge-traffic.o vxge-ethtool.o vxge-main.o
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
new file mode 100644
index 000000000000..1520c574cb20
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -0,0 +1,5123 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#include <linux/vmalloc.h>
15#include <linux/etherdevice.h>
16#include <linux/pci.h>
17#include <linux/pci_hotplug.h>
18#include <linux/slab.h>
19
20#include "vxge-traffic.h"
21#include "vxge-config.h"
22#include "vxge-main.h"
23
24#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
25 status = __vxge_hw_vpath_stats_access(vpath, \
26 VXGE_HW_STATS_OP_READ, \
27 offset, \
28 &val64); \
29 if (status != VXGE_HW_OK) \
30 return status; \
31}
32
33static void
34vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
35{
36 u64 val64;
37
38 val64 = readq(&vp_reg->rxmac_vcfg0);
39 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
40 writeq(val64, &vp_reg->rxmac_vcfg0);
41 val64 = readq(&vp_reg->rxmac_vcfg0);
42}
43
44/*
45 * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
46 */
47int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
48{
49 struct vxge_hw_vpath_reg __iomem *vp_reg;
50 struct __vxge_hw_virtualpath *vpath;
51 u64 val64, rxd_count, rxd_spat;
52 int count = 0, total_count = 0;
53
54 vpath = &hldev->virtual_paths[vp_id];
55 vp_reg = vpath->vp_reg;
56
57 vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
58
59 /* Check that the ring controller for this vpath has enough free RxDs
60 * to send frames to the host. This is done by reading the
61 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
62 * RXD_SPAT value for the vpath.
63 */
64 val64 = readq(&vp_reg->prc_cfg6);
65 rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
66 /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
67 * leg room.
68 */
69 rxd_spat *= 2;
70
71 do {
72 mdelay(1);
73
74 rxd_count = readq(&vp_reg->prc_rxd_doorbell);
75
76 /* Check that the ring controller for this vpath does
77 * not have any frame in its pipeline.
78 */
79 val64 = readq(&vp_reg->frm_in_progress_cnt);
80 if ((rxd_count <= rxd_spat) || (val64 > 0))
81 count = 0;
82 else
83 count++;
84 total_count++;
85 } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
86 (total_count < VXGE_HW_MAX_POLLING_COUNT));
87
88 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
89 printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
90 __func__);
91
92 return total_count;
93}
94
95/* vxge_hw_device_wait_receive_idle - This function waits until all frames
96 * stored in the frame buffer for each vpath assigned to the given
97 * function (hldev) have been sent to the host.
98 */
99void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
100{
101 int i, total_count = 0;
102
103 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
104 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
105 continue;
106
107 total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
108 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
109 break;
110 }
111}
112
113/*
114 * __vxge_hw_device_register_poll
115 * Will poll certain register for specified amount of time.
116 * Will poll until masked bit is not cleared.
117 */
118static enum vxge_hw_status
119__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
120{
121 u64 val64;
122 u32 i = 0;
123 enum vxge_hw_status ret = VXGE_HW_FAIL;
124
125 udelay(10);
126
127 do {
128 val64 = readq(reg);
129 if (!(val64 & mask))
130 return VXGE_HW_OK;
131 udelay(100);
132 } while (++i <= 9);
133
134 i = 0;
135 do {
136 val64 = readq(reg);
137 if (!(val64 & mask))
138 return VXGE_HW_OK;
139 mdelay(1);
140 } while (++i <= max_millis);
141
142 return ret;
143}
144
145static inline enum vxge_hw_status
146__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
147 u64 mask, u32 max_millis)
148{
149 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
150 wmb();
151 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
152 wmb();
153
154 return __vxge_hw_device_register_poll(addr, mask, max_millis);
155}
156
157static enum vxge_hw_status
158vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
159 u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
160 u64 *steer_ctrl)
161{
162 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
163 enum vxge_hw_status status;
164 u64 val64;
165 u32 retry = 0, max_retry = 3;
166
167 spin_lock(&vpath->lock);
168 if (!vpath->vp_open) {
169 spin_unlock(&vpath->lock);
170 max_retry = 100;
171 }
172
173 writeq(*data0, &vp_reg->rts_access_steer_data0);
174 writeq(*data1, &vp_reg->rts_access_steer_data1);
175 wmb();
176
177 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
178 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
179 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
180 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
181 *steer_ctrl;
182
183 status = __vxge_hw_pio_mem_write64(val64,
184 &vp_reg->rts_access_steer_ctrl,
185 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
186 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
187
188 /* The __vxge_hw_device_register_poll can udelay for a significant
189 * amount of time, blocking other process from the CPU. If it delays
190 * for ~5secs, a NMI error can occur. A way around this is to give up
191 * the processor via msleep, but this is not allowed is under lock.
192 * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
193 * 1sec and sleep for 10ms until the firmware operation has completed
194 * or timed-out.
195 */
196 while ((status != VXGE_HW_OK) && retry++ < max_retry) {
197 if (!vpath->vp_open)
198 msleep(20);
199 status = __vxge_hw_device_register_poll(
200 &vp_reg->rts_access_steer_ctrl,
201 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
202 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
203 }
204
205 if (status != VXGE_HW_OK)
206 goto out;
207
208 val64 = readq(&vp_reg->rts_access_steer_ctrl);
209 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
210 *data0 = readq(&vp_reg->rts_access_steer_data0);
211 *data1 = readq(&vp_reg->rts_access_steer_data1);
212 *steer_ctrl = val64;
213 } else
214 status = VXGE_HW_FAIL;
215
216out:
217 if (vpath->vp_open)
218 spin_unlock(&vpath->lock);
219 return status;
220}
221
222enum vxge_hw_status
223vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
224 u32 *minor, u32 *build)
225{
226 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
227 struct __vxge_hw_virtualpath *vpath;
228 enum vxge_hw_status status;
229
230 vpath = &hldev->virtual_paths[hldev->first_vp_id];
231
232 status = vxge_hw_vpath_fw_api(vpath,
233 VXGE_HW_FW_UPGRADE_ACTION,
234 VXGE_HW_FW_UPGRADE_MEMO,
235 VXGE_HW_FW_UPGRADE_OFFSET_READ,
236 &data0, &data1, &steer_ctrl);
237 if (status != VXGE_HW_OK)
238 return status;
239
240 *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
241 *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
242 *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
243
244 return status;
245}
246
247enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
248{
249 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
250 struct __vxge_hw_virtualpath *vpath;
251 enum vxge_hw_status status;
252 u32 ret;
253
254 vpath = &hldev->virtual_paths[hldev->first_vp_id];
255
256 status = vxge_hw_vpath_fw_api(vpath,
257 VXGE_HW_FW_UPGRADE_ACTION,
258 VXGE_HW_FW_UPGRADE_MEMO,
259 VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
260 &data0, &data1, &steer_ctrl);
261 if (status != VXGE_HW_OK) {
262 vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
263 goto exit;
264 }
265
266 ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
267 if (ret != 1) {
268 vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
269 __func__, ret);
270 status = VXGE_HW_FAIL;
271 }
272
273exit:
274 return status;
275}
276
277enum vxge_hw_status
278vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
279{
280 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
281 struct __vxge_hw_virtualpath *vpath;
282 enum vxge_hw_status status;
283 int ret_code, sec_code;
284
285 vpath = &hldev->virtual_paths[hldev->first_vp_id];
286
287 /* send upgrade start command */
288 status = vxge_hw_vpath_fw_api(vpath,
289 VXGE_HW_FW_UPGRADE_ACTION,
290 VXGE_HW_FW_UPGRADE_MEMO,
291 VXGE_HW_FW_UPGRADE_OFFSET_START,
292 &data0, &data1, &steer_ctrl);
293 if (status != VXGE_HW_OK) {
294 vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
295 __func__);
296 return status;
297 }
298
299 /* Transfer fw image to adapter 16 bytes at a time */
300 for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
301 steer_ctrl = 0;
302
303 /* The next 128bits of fwdata to be loaded onto the adapter */
304 data0 = *((u64 *)fwdata);
305 data1 = *((u64 *)fwdata + 1);
306
307 status = vxge_hw_vpath_fw_api(vpath,
308 VXGE_HW_FW_UPGRADE_ACTION,
309 VXGE_HW_FW_UPGRADE_MEMO,
310 VXGE_HW_FW_UPGRADE_OFFSET_SEND,
311 &data0, &data1, &steer_ctrl);
312 if (status != VXGE_HW_OK) {
313 vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
314 __func__);
315 goto out;
316 }
317
318 ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
319 switch (ret_code) {
320 case VXGE_HW_FW_UPGRADE_OK:
321 /* All OK, send next 16 bytes. */
322 break;
323 case VXGE_FW_UPGRADE_BYTES2SKIP:
324 /* skip bytes in the stream */
325 fwdata += (data0 >> 8) & 0xFFFFFFFF;
326 break;
327 case VXGE_HW_FW_UPGRADE_DONE:
328 goto out;
329 case VXGE_HW_FW_UPGRADE_ERR:
330 sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
331 switch (sec_code) {
332 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
333 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
334 printk(KERN_ERR
335 "corrupted data from .ncf file\n");
336 break;
337 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
338 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
339 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
340 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
341 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
342 printk(KERN_ERR "invalid .ncf file\n");
343 break;
344 case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
345 printk(KERN_ERR "buffer overflow\n");
346 break;
347 case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
348 printk(KERN_ERR "failed to flash the image\n");
349 break;
350 case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
351 printk(KERN_ERR
352 "generic error. Unknown error type\n");
353 break;
354 default:
355 printk(KERN_ERR "Unknown error of type %d\n",
356 sec_code);
357 break;
358 }
359 status = VXGE_HW_FAIL;
360 goto out;
361 default:
362 printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
363 status = VXGE_HW_FAIL;
364 goto out;
365 }
366 /* point to next 16 bytes */
367 fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
368 }
369out:
370 return status;
371}
372
373enum vxge_hw_status
374vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
375 struct eprom_image *img)
376{
377 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
378 struct __vxge_hw_virtualpath *vpath;
379 enum vxge_hw_status status;
380 int i;
381
382 vpath = &hldev->virtual_paths[hldev->first_vp_id];
383
384 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
385 data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
386 data1 = steer_ctrl = 0;
387
388 status = vxge_hw_vpath_fw_api(vpath,
389 VXGE_HW_FW_API_GET_EPROM_REV,
390 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
391 0, &data0, &data1, &steer_ctrl);
392 if (status != VXGE_HW_OK)
393 break;
394
395 img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
396 img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
397 img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
398 img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
399 }
400
401 return status;
402}
403
404/*
405 * __vxge_hw_channel_free - Free memory allocated for channel
406 * This function deallocates memory from the channel and various arrays
407 * in the channel
408 */
409static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
410{
411 kfree(channel->work_arr);
412 kfree(channel->free_arr);
413 kfree(channel->reserve_arr);
414 kfree(channel->orig_arr);
415 kfree(channel);
416}
417
418/*
419 * __vxge_hw_channel_initialize - Initialize a channel
420 * This function initializes a channel by properly setting the
421 * various references
422 */
423static enum vxge_hw_status
424__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
425{
426 u32 i;
427 struct __vxge_hw_virtualpath *vpath;
428
429 vpath = channel->vph->vpath;
430
431 if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
432 for (i = 0; i < channel->length; i++)
433 channel->orig_arr[i] = channel->reserve_arr[i];
434 }
435
436 switch (channel->type) {
437 case VXGE_HW_CHANNEL_TYPE_FIFO:
438 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
439 channel->stats = &((struct __vxge_hw_fifo *)
440 channel)->stats->common_stats;
441 break;
442 case VXGE_HW_CHANNEL_TYPE_RING:
443 vpath->ringh = (struct __vxge_hw_ring *)channel;
444 channel->stats = &((struct __vxge_hw_ring *)
445 channel)->stats->common_stats;
446 break;
447 default:
448 break;
449 }
450
451 return VXGE_HW_OK;
452}
453
454/*
455 * __vxge_hw_channel_reset - Resets a channel
456 * This function resets a channel by properly setting the various references
457 */
458static enum vxge_hw_status
459__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
460{
461 u32 i;
462
463 for (i = 0; i < channel->length; i++) {
464 if (channel->reserve_arr != NULL)
465 channel->reserve_arr[i] = channel->orig_arr[i];
466 if (channel->free_arr != NULL)
467 channel->free_arr[i] = NULL;
468 if (channel->work_arr != NULL)
469 channel->work_arr[i] = NULL;
470 }
471 channel->free_ptr = channel->length;
472 channel->reserve_ptr = channel->length;
473 channel->reserve_top = 0;
474 channel->post_index = 0;
475 channel->compl_index = 0;
476
477 return VXGE_HW_OK;
478}
479
480/*
481 * __vxge_hw_device_pci_e_init
482 * Initialize certain PCI/PCI-X configuration registers
483 * with recommended values. Save config space for future hw resets.
484 */
485static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
486{
487 u16 cmd = 0;
488
489 /* Set the PErr Repconse bit and SERR in PCI command register. */
490 pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
491 cmd |= 0x140;
492 pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
493
494 pci_save_state(hldev->pdev);
495}
496
497/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
498 * in progress
499 * This routine checks the vpath reset in progress register is turned zero
500 */
501static enum vxge_hw_status
502__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
503{
504 enum vxge_hw_status status;
505 status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
506 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
507 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
508 return status;
509}
510
511/*
512 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
513 * Set the swapper bits appropriately for the lagacy section.
514 */
515static enum vxge_hw_status
516__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
517{
518 u64 val64;
519 enum vxge_hw_status status = VXGE_HW_OK;
520
521 val64 = readq(&legacy_reg->toc_swapper_fb);
522
523 wmb();
524
525 switch (val64) {
526 case VXGE_HW_SWAPPER_INITIAL_VALUE:
527 return status;
528
529 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
530 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
531 &legacy_reg->pifm_rd_swap_en);
532 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
533 &legacy_reg->pifm_rd_flip_en);
534 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
535 &legacy_reg->pifm_wr_swap_en);
536 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
537 &legacy_reg->pifm_wr_flip_en);
538 break;
539
540 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
541 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
542 &legacy_reg->pifm_rd_swap_en);
543 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
544 &legacy_reg->pifm_wr_swap_en);
545 break;
546
547 case VXGE_HW_SWAPPER_BIT_FLIPPED:
548 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
549 &legacy_reg->pifm_rd_flip_en);
550 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
551 &legacy_reg->pifm_wr_flip_en);
552 break;
553 }
554
555 wmb();
556
557 val64 = readq(&legacy_reg->toc_swapper_fb);
558
559 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
560 status = VXGE_HW_ERR_SWAPPER_CTRL;
561
562 return status;
563}
564
565/*
566 * __vxge_hw_device_toc_get
567 * This routine sets the swapper and reads the toc pointer and returns the
568 * memory mapped address of the toc
569 */
570static struct vxge_hw_toc_reg __iomem *
571__vxge_hw_device_toc_get(void __iomem *bar0)
572{
573 u64 val64;
574 struct vxge_hw_toc_reg __iomem *toc = NULL;
575 enum vxge_hw_status status;
576
577 struct vxge_hw_legacy_reg __iomem *legacy_reg =
578 (struct vxge_hw_legacy_reg __iomem *)bar0;
579
580 status = __vxge_hw_legacy_swapper_set(legacy_reg);
581 if (status != VXGE_HW_OK)
582 goto exit;
583
584 val64 = readq(&legacy_reg->toc_first_pointer);
585 toc = bar0 + val64;
586exit:
587 return toc;
588}
589
590/*
591 * __vxge_hw_device_reg_addr_get
592 * This routine sets the swapper and reads the toc pointer and initializes the
593 * register location pointers in the device object. It waits until the ric is
594 * completed initializing registers.
595 */
596static enum vxge_hw_status
597__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
598{
599 u64 val64;
600 u32 i;
601 enum vxge_hw_status status = VXGE_HW_OK;
602
603 hldev->legacy_reg = hldev->bar0;
604
605 hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
606 if (hldev->toc_reg == NULL) {
607 status = VXGE_HW_FAIL;
608 goto exit;
609 }
610
611 val64 = readq(&hldev->toc_reg->toc_common_pointer);
612 hldev->common_reg = hldev->bar0 + val64;
613
614 val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
615 hldev->mrpcim_reg = hldev->bar0 + val64;
616
617 for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
618 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
619 hldev->srpcim_reg[i] = hldev->bar0 + val64;
620 }
621
622 for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
623 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
624 hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
625 }
626
627 for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
628 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
629 hldev->vpath_reg[i] = hldev->bar0 + val64;
630 }
631
632 val64 = readq(&hldev->toc_reg->toc_kdfc);
633
634 switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
635 case 0:
636 hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
637 break;
638 default:
639 break;
640 }
641
642 status = __vxge_hw_device_vpath_reset_in_prog_check(
643 (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
644exit:
645 return status;
646}
647
648/*
649 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
650 * This routine returns the Access Rights of the driver
651 */
652static u32
653__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
654{
655 u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
656
657 switch (host_type) {
658 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
659 if (func_id == 0) {
660 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
661 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
662 }
663 break;
664 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
665 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
666 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
667 break;
668 case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
669 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
670 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
671 break;
672 case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
673 case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
674 case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
675 break;
676 case VXGE_HW_SR_VH_FUNCTION0:
677 case VXGE_HW_VH_NORMAL_FUNCTION:
678 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
679 break;
680 }
681
682 return access_rights;
683}
684/*
685 * __vxge_hw_device_is_privilaged
686 * This routine checks if the device function is privilaged or not
687 */
688
689enum vxge_hw_status
690__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
691{
692 if (__vxge_hw_device_access_rights_get(host_type,
693 func_id) &
694 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
695 return VXGE_HW_OK;
696 else
697 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
698}
699
700/*
701 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
702 * Returns the function number of the vpath.
703 */
704static u32
705__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
706{
707 u64 val64;
708
709 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
710
711 return
712 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
713}
714
715/*
716 * __vxge_hw_device_host_info_get
717 * This routine returns the host type assignments
718 */
719static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
720{
721 u64 val64;
722 u32 i;
723
724 val64 = readq(&hldev->common_reg->host_type_assignments);
725
726 hldev->host_type =
727 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
728
729 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
730
731 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
732 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
733 continue;
734
735 hldev->func_id =
736 __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
737
738 hldev->access_rights = __vxge_hw_device_access_rights_get(
739 hldev->host_type, hldev->func_id);
740
741 hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
742 hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
743
744 hldev->first_vp_id = i;
745 break;
746 }
747}
748
749/*
750 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
751 * link width and signalling rate.
752 */
753static enum vxge_hw_status
754__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
755{
756 struct pci_dev *dev = hldev->pdev;
757 u16 lnk;
758
759 /* Get the negotiated link width and speed from PCI config space */
760 pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
761
762 if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
763 return VXGE_HW_ERR_INVALID_PCI_INFO;
764
765 switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
766 case PCIE_LNK_WIDTH_RESRV:
767 case PCIE_LNK_X1:
768 case PCIE_LNK_X2:
769 case PCIE_LNK_X4:
770 case PCIE_LNK_X8:
771 break;
772 default:
773 return VXGE_HW_ERR_INVALID_PCI_INFO;
774 }
775
776 return VXGE_HW_OK;
777}
778
779/*
780 * __vxge_hw_device_initialize
781 * Initialize Titan-V hardware.
782 */
783static enum vxge_hw_status
784__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
785{
786 enum vxge_hw_status status = VXGE_HW_OK;
787
788 if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
789 hldev->func_id)) {
790 /* Validate the pci-e link width and speed */
791 status = __vxge_hw_verify_pci_e_info(hldev);
792 if (status != VXGE_HW_OK)
793 goto exit;
794 }
795
796exit:
797 return status;
798}
799
800/*
801 * __vxge_hw_vpath_fw_ver_get - Get the fw version
802 * Returns FW Version
803 */
804static enum vxge_hw_status
805__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
806 struct vxge_hw_device_hw_info *hw_info)
807{
808 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
809 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
810 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
811 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
812 u64 data0, data1 = 0, steer_ctrl = 0;
813 enum vxge_hw_status status;
814
815 status = vxge_hw_vpath_fw_api(vpath,
816 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
817 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
818 0, &data0, &data1, &steer_ctrl);
819 if (status != VXGE_HW_OK)
820 goto exit;
821
822 fw_date->day =
823 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
824 fw_date->month =
825 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
826 fw_date->year =
827 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
828
829 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
830 fw_date->month, fw_date->day, fw_date->year);
831
832 fw_version->major =
833 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
834 fw_version->minor =
835 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
836 fw_version->build =
837 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
838
839 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
840 fw_version->major, fw_version->minor, fw_version->build);
841
842 flash_date->day =
843 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
844 flash_date->month =
845 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
846 flash_date->year =
847 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
848
849 snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
850 flash_date->month, flash_date->day, flash_date->year);
851
852 flash_version->major =
853 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
854 flash_version->minor =
855 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
856 flash_version->build =
857 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
858
859 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
860 flash_version->major, flash_version->minor,
861 flash_version->build);
862
863exit:
864 return status;
865}
866
867/*
868 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
869 * part number and product description.
870 */
871static enum vxge_hw_status
872__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
873 struct vxge_hw_device_hw_info *hw_info)
874{
875 enum vxge_hw_status status;
876 u64 data0, data1 = 0, steer_ctrl = 0;
877 u8 *serial_number = hw_info->serial_number;
878 u8 *part_number = hw_info->part_number;
879 u8 *product_desc = hw_info->product_desc;
880 u32 i, j = 0;
881
882 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
883
884 status = vxge_hw_vpath_fw_api(vpath,
885 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
886 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
887 0, &data0, &data1, &steer_ctrl);
888 if (status != VXGE_HW_OK)
889 return status;
890
891 ((u64 *)serial_number)[0] = be64_to_cpu(data0);
892 ((u64 *)serial_number)[1] = be64_to_cpu(data1);
893
894 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
895 data1 = steer_ctrl = 0;
896
897 status = vxge_hw_vpath_fw_api(vpath,
898 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
899 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
900 0, &data0, &data1, &steer_ctrl);
901 if (status != VXGE_HW_OK)
902 return status;
903
904 ((u64 *)part_number)[0] = be64_to_cpu(data0);
905 ((u64 *)part_number)[1] = be64_to_cpu(data1);
906
907 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
908 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
909 data0 = i;
910 data1 = steer_ctrl = 0;
911
912 status = vxge_hw_vpath_fw_api(vpath,
913 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
914 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
915 0, &data0, &data1, &steer_ctrl);
916 if (status != VXGE_HW_OK)
917 return status;
918
919 ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
920 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
921 }
922
923 return status;
924}
925
926/*
927 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
928 * Returns pci function mode
929 */
930static enum vxge_hw_status
931__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
932 struct vxge_hw_device_hw_info *hw_info)
933{
934 u64 data0, data1 = 0, steer_ctrl = 0;
935 enum vxge_hw_status status;
936
937 data0 = 0;
938
939 status = vxge_hw_vpath_fw_api(vpath,
940 VXGE_HW_FW_API_GET_FUNC_MODE,
941 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
942 0, &data0, &data1, &steer_ctrl);
943 if (status != VXGE_HW_OK)
944 return status;
945
946 hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
947 return status;
948}
949
950/*
951 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
952 * from MAC address table.
953 */
954static enum vxge_hw_status
955__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
956 u8 *macaddr, u8 *macaddr_mask)
957{
958 u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
959 data0 = 0, data1 = 0, steer_ctrl = 0;
960 enum vxge_hw_status status;
961 int i;
962
963 do {
964 status = vxge_hw_vpath_fw_api(vpath, action,
965 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
966 0, &data0, &data1, &steer_ctrl);
967 if (status != VXGE_HW_OK)
968 goto exit;
969
970 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
971 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
972 data1);
973
974 for (i = ETH_ALEN; i > 0; i--) {
975 macaddr[i - 1] = (u8) (data0 & 0xFF);
976 data0 >>= 8;
977
978 macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
979 data1 >>= 8;
980 }
981
982 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
983 data0 = 0, data1 = 0, steer_ctrl = 0;
984
985 } while (!is_valid_ether_addr(macaddr));
986exit:
987 return status;
988}
989
990/**
991 * vxge_hw_device_hw_info_get - Get the hw information
992 * Returns the vpath mask that has the bits set for each vpath allocated
993 * for the driver, FW version information, and the first mac address for
994 * each vpath
995 */
996enum vxge_hw_status __devinit
997vxge_hw_device_hw_info_get(void __iomem *bar0,
998 struct vxge_hw_device_hw_info *hw_info)
999{
1000 u32 i;
1001 u64 val64;
1002 struct vxge_hw_toc_reg __iomem *toc;
1003 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
1004 struct vxge_hw_common_reg __iomem *common_reg;
1005 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
1006 enum vxge_hw_status status;
1007 struct __vxge_hw_virtualpath vpath;
1008
1009 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
1010
1011 toc = __vxge_hw_device_toc_get(bar0);
1012 if (toc == NULL) {
1013 status = VXGE_HW_ERR_CRITICAL;
1014 goto exit;
1015 }
1016
1017 val64 = readq(&toc->toc_common_pointer);
1018 common_reg = bar0 + val64;
1019
1020 status = __vxge_hw_device_vpath_reset_in_prog_check(
1021 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
1022 if (status != VXGE_HW_OK)
1023 goto exit;
1024
1025 hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
1026
1027 val64 = readq(&common_reg->host_type_assignments);
1028
1029 hw_info->host_type =
1030 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
1031
1032 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1033 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1034 continue;
1035
1036 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
1037
1038 vpmgmt_reg = bar0 + val64;
1039
1040 hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
1041 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
1042 hw_info->func_id) &
1043 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
1044
1045 val64 = readq(&toc->toc_mrpcim_pointer);
1046
1047 mrpcim_reg = bar0 + val64;
1048
1049 writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
1050 wmb();
1051 }
1052
1053 val64 = readq(&toc->toc_vpath_pointer[i]);
1054
1055 spin_lock_init(&vpath.lock);
1056 vpath.vp_reg = bar0 + val64;
1057 vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1058
1059 status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
1060 if (status != VXGE_HW_OK)
1061 goto exit;
1062
1063 status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
1064 if (status != VXGE_HW_OK)
1065 goto exit;
1066
1067 status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
1068 if (status != VXGE_HW_OK)
1069 goto exit;
1070
1071 break;
1072 }
1073
1074 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1075 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1076 continue;
1077
1078 val64 = readq(&toc->toc_vpath_pointer[i]);
1079 vpath.vp_reg = bar0 + val64;
1080 vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1081
1082 status = __vxge_hw_vpath_addr_get(&vpath,
1083 hw_info->mac_addrs[i],
1084 hw_info->mac_addr_masks[i]);
1085 if (status != VXGE_HW_OK)
1086 goto exit;
1087 }
1088exit:
1089 return status;
1090}
1091
1092/*
1093 * __vxge_hw_blockpool_destroy - Deallocates the block pool
1094 */
1095static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
1096{
1097 struct __vxge_hw_device *hldev;
1098 struct list_head *p, *n;
1099 u16 ret;
1100
1101 if (blockpool == NULL) {
1102 ret = 1;
1103 goto exit;
1104 }
1105
1106 hldev = blockpool->hldev;
1107
1108 list_for_each_safe(p, n, &blockpool->free_block_list) {
1109 pci_unmap_single(hldev->pdev,
1110 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
1111 ((struct __vxge_hw_blockpool_entry *)p)->length,
1112 PCI_DMA_BIDIRECTIONAL);
1113
1114 vxge_os_dma_free(hldev->pdev,
1115 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
1116 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
1117
1118 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1119 kfree(p);
1120 blockpool->pool_size--;
1121 }
1122
1123 list_for_each_safe(p, n, &blockpool->free_entry_list) {
1124 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1125 kfree((void *)p);
1126 }
1127 ret = 0;
1128exit:
1129 return;
1130}
1131
1132/*
1133 * __vxge_hw_blockpool_create - Create block pool
1134 */
1135static enum vxge_hw_status
1136__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1137 struct __vxge_hw_blockpool *blockpool,
1138 u32 pool_size,
1139 u32 pool_max)
1140{
1141 u32 i;
1142 struct __vxge_hw_blockpool_entry *entry = NULL;
1143 void *memblock;
1144 dma_addr_t dma_addr;
1145 struct pci_dev *dma_handle;
1146 struct pci_dev *acc_handle;
1147 enum vxge_hw_status status = VXGE_HW_OK;
1148
1149 if (blockpool == NULL) {
1150 status = VXGE_HW_FAIL;
1151 goto blockpool_create_exit;
1152 }
1153
1154 blockpool->hldev = hldev;
1155 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
1156 blockpool->pool_size = 0;
1157 blockpool->pool_max = pool_max;
1158 blockpool->req_out = 0;
1159
1160 INIT_LIST_HEAD(&blockpool->free_block_list);
1161 INIT_LIST_HEAD(&blockpool->free_entry_list);
1162
1163 for (i = 0; i < pool_size + pool_max; i++) {
1164 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1165 GFP_KERNEL);
1166 if (entry == NULL) {
1167 __vxge_hw_blockpool_destroy(blockpool);
1168 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1169 goto blockpool_create_exit;
1170 }
1171 list_add(&entry->item, &blockpool->free_entry_list);
1172 }
1173
1174 for (i = 0; i < pool_size; i++) {
1175 memblock = vxge_os_dma_malloc(
1176 hldev->pdev,
1177 VXGE_HW_BLOCK_SIZE,
1178 &dma_handle,
1179 &acc_handle);
1180 if (memblock == NULL) {
1181 __vxge_hw_blockpool_destroy(blockpool);
1182 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1183 goto blockpool_create_exit;
1184 }
1185
1186 dma_addr = pci_map_single(hldev->pdev, memblock,
1187 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
1188 if (unlikely(pci_dma_mapping_error(hldev->pdev,
1189 dma_addr))) {
1190 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
1191 __vxge_hw_blockpool_destroy(blockpool);
1192 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1193 goto blockpool_create_exit;
1194 }
1195
1196 if (!list_empty(&blockpool->free_entry_list))
1197 entry = (struct __vxge_hw_blockpool_entry *)
1198 list_first_entry(&blockpool->free_entry_list,
1199 struct __vxge_hw_blockpool_entry,
1200 item);
1201
1202 if (entry == NULL)
1203 entry =
1204 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1205 GFP_KERNEL);
1206 if (entry != NULL) {
1207 list_del(&entry->item);
1208 entry->length = VXGE_HW_BLOCK_SIZE;
1209 entry->memblock = memblock;
1210 entry->dma_addr = dma_addr;
1211 entry->acc_handle = acc_handle;
1212 entry->dma_handle = dma_handle;
1213 list_add(&entry->item,
1214 &blockpool->free_block_list);
1215 blockpool->pool_size++;
1216 } else {
1217 __vxge_hw_blockpool_destroy(blockpool);
1218 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1219 goto blockpool_create_exit;
1220 }
1221 }
1222
1223blockpool_create_exit:
1224 return status;
1225}
1226
1227/*
1228 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1229 * Check the fifo configuration
1230 */
1231static enum vxge_hw_status
1232__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1233{
1234 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1235 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1236 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1237
1238 return VXGE_HW_OK;
1239}
1240
1241/*
1242 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1243 * Check the vpath configuration
1244 */
1245static enum vxge_hw_status
1246__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1247{
1248 enum vxge_hw_status status;
1249
1250 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1251 (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
1252 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1253
1254 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1255 if (status != VXGE_HW_OK)
1256 return status;
1257
1258 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1259 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1260 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1261 return VXGE_HW_BADCFG_VPATH_MTU;
1262
1263 if ((vp_config->rpa_strip_vlan_tag !=
1264 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1265 (vp_config->rpa_strip_vlan_tag !=
1266 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1267 (vp_config->rpa_strip_vlan_tag !=
1268 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1269 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1270
1271 return VXGE_HW_OK;
1272}
1273
1274/*
1275 * __vxge_hw_device_config_check - Check device configuration.
1276 * Check the device configuration
1277 */
1278static enum vxge_hw_status
1279__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1280{
1281 u32 i;
1282 enum vxge_hw_status status;
1283
1284 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1285 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1286 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1287 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1288 return VXGE_HW_BADCFG_INTR_MODE;
1289
1290 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1291 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1292 return VXGE_HW_BADCFG_RTS_MAC_EN;
1293
1294 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1295 status = __vxge_hw_device_vpath_config_check(
1296 &new_config->vp_config[i]);
1297 if (status != VXGE_HW_OK)
1298 return status;
1299 }
1300
1301 return VXGE_HW_OK;
1302}
1303
1304/*
1305 * vxge_hw_device_initialize - Initialize Titan device.
1306 * Initialize Titan device. Note that all the arguments of this public API
1307 * are 'IN', including @hldev. Driver cooperates with
1308 * OS to find new Titan device, locate its PCI and memory spaces.
1309 *
1310 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
1311 * to enable the latter to perform Titan hardware initialization.
1312 */
1313enum vxge_hw_status __devinit
1314vxge_hw_device_initialize(
1315 struct __vxge_hw_device **devh,
1316 struct vxge_hw_device_attr *attr,
1317 struct vxge_hw_device_config *device_config)
1318{
1319 u32 i;
1320 u32 nblocks = 0;
1321 struct __vxge_hw_device *hldev = NULL;
1322 enum vxge_hw_status status = VXGE_HW_OK;
1323
1324 status = __vxge_hw_device_config_check(device_config);
1325 if (status != VXGE_HW_OK)
1326 goto exit;
1327
1328 hldev = vzalloc(sizeof(struct __vxge_hw_device));
1329 if (hldev == NULL) {
1330 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1331 goto exit;
1332 }
1333
1334 hldev->magic = VXGE_HW_DEVICE_MAGIC;
1335
1336 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
1337
1338 /* apply config */
1339 memcpy(&hldev->config, device_config,
1340 sizeof(struct vxge_hw_device_config));
1341
1342 hldev->bar0 = attr->bar0;
1343 hldev->pdev = attr->pdev;
1344
1345 hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
1346 hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
1347 hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
1348
1349 __vxge_hw_device_pci_e_init(hldev);
1350
1351 status = __vxge_hw_device_reg_addr_get(hldev);
1352 if (status != VXGE_HW_OK) {
1353 vfree(hldev);
1354 goto exit;
1355 }
1356
1357 __vxge_hw_device_host_info_get(hldev);
1358
1359 /* Incrementing for stats blocks */
1360 nblocks++;
1361
1362 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1363 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
1364 continue;
1365
1366 if (device_config->vp_config[i].ring.enable ==
1367 VXGE_HW_RING_ENABLE)
1368 nblocks += device_config->vp_config[i].ring.ring_blocks;
1369
1370 if (device_config->vp_config[i].fifo.enable ==
1371 VXGE_HW_FIFO_ENABLE)
1372 nblocks += device_config->vp_config[i].fifo.fifo_blocks;
1373 nblocks++;
1374 }
1375
1376 if (__vxge_hw_blockpool_create(hldev,
1377 &hldev->block_pool,
1378 device_config->dma_blockpool_initial + nblocks,
1379 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
1380
1381 vxge_hw_device_terminate(hldev);
1382 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1383 goto exit;
1384 }
1385
1386 status = __vxge_hw_device_initialize(hldev);
1387 if (status != VXGE_HW_OK) {
1388 vxge_hw_device_terminate(hldev);
1389 goto exit;
1390 }
1391
1392 *devh = hldev;
1393exit:
1394 return status;
1395}
1396
1397/*
1398 * vxge_hw_device_terminate - Terminate Titan device.
1399 * Terminate HW device.
1400 */
1401void
1402vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
1403{
1404 vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
1405
1406 hldev->magic = VXGE_HW_DEVICE_DEAD;
1407 __vxge_hw_blockpool_destroy(&hldev->block_pool);
1408 vfree(hldev);
1409}
1410
1411/*
1412 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
1413 * and offset and perform an operation
1414 */
1415static enum vxge_hw_status
1416__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
1417 u32 operation, u32 offset, u64 *stat)
1418{
1419 u64 val64;
1420 enum vxge_hw_status status = VXGE_HW_OK;
1421 struct vxge_hw_vpath_reg __iomem *vp_reg;
1422
1423 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1424 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1425 goto vpath_stats_access_exit;
1426 }
1427
1428 vp_reg = vpath->vp_reg;
1429
1430 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
1431 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
1432 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
1433
1434 status = __vxge_hw_pio_mem_write64(val64,
1435 &vp_reg->xmac_stats_access_cmd,
1436 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
1437 vpath->hldev->config.device_poll_millis);
1438 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1439 *stat = readq(&vp_reg->xmac_stats_access_data);
1440 else
1441 *stat = 0;
1442
1443vpath_stats_access_exit:
1444 return status;
1445}
1446
1447/*
1448 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
1449 */
1450static enum vxge_hw_status
1451__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
1452 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
1453{
1454 u64 *val64;
1455 int i;
1456 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
1457 enum vxge_hw_status status = VXGE_HW_OK;
1458
1459 val64 = (u64 *)vpath_tx_stats;
1460
1461 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1462 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1463 goto exit;
1464 }
1465
1466 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
1467 status = __vxge_hw_vpath_stats_access(vpath,
1468 VXGE_HW_STATS_OP_READ,
1469 offset, val64);
1470 if (status != VXGE_HW_OK)
1471 goto exit;
1472 offset++;
1473 val64++;
1474 }
1475exit:
1476 return status;
1477}
1478
1479/*
1480 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
1481 */
1482static enum vxge_hw_status
1483__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
1484 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
1485{
1486 u64 *val64;
1487 enum vxge_hw_status status = VXGE_HW_OK;
1488 int i;
1489 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
1490 val64 = (u64 *) vpath_rx_stats;
1491
1492 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1493 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1494 goto exit;
1495 }
1496 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
1497 status = __vxge_hw_vpath_stats_access(vpath,
1498 VXGE_HW_STATS_OP_READ,
1499 offset >> 3, val64);
1500 if (status != VXGE_HW_OK)
1501 goto exit;
1502
1503 offset += 8;
1504 val64++;
1505 }
1506exit:
1507 return status;
1508}
1509
1510/*
1511 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
1512 */
1513static enum vxge_hw_status
1514__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
1515 struct vxge_hw_vpath_stats_hw_info *hw_stats)
1516{
1517 u64 val64;
1518 enum vxge_hw_status status = VXGE_HW_OK;
1519 struct vxge_hw_vpath_reg __iomem *vp_reg;
1520
1521 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1522 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1523 goto exit;
1524 }
1525 vp_reg = vpath->vp_reg;
1526
1527 val64 = readq(&vp_reg->vpath_debug_stats0);
1528 hw_stats->ini_num_mwr_sent =
1529 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
1530
1531 val64 = readq(&vp_reg->vpath_debug_stats1);
1532 hw_stats->ini_num_mrd_sent =
1533 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
1534
1535 val64 = readq(&vp_reg->vpath_debug_stats2);
1536 hw_stats->ini_num_cpl_rcvd =
1537 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
1538
1539 val64 = readq(&vp_reg->vpath_debug_stats3);
1540 hw_stats->ini_num_mwr_byte_sent =
1541 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
1542
1543 val64 = readq(&vp_reg->vpath_debug_stats4);
1544 hw_stats->ini_num_cpl_byte_rcvd =
1545 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
1546
1547 val64 = readq(&vp_reg->vpath_debug_stats5);
1548 hw_stats->wrcrdtarb_xoff =
1549 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
1550
1551 val64 = readq(&vp_reg->vpath_debug_stats6);
1552 hw_stats->rdcrdtarb_xoff =
1553 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
1554
1555 val64 = readq(&vp_reg->vpath_genstats_count01);
1556 hw_stats->vpath_genstats_count0 =
1557 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
1558 val64);
1559
1560 val64 = readq(&vp_reg->vpath_genstats_count01);
1561 hw_stats->vpath_genstats_count1 =
1562 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
1563 val64);
1564
1565 val64 = readq(&vp_reg->vpath_genstats_count23);
1566 hw_stats->vpath_genstats_count2 =
1567 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
1568 val64);
1569
1570 val64 = readq(&vp_reg->vpath_genstats_count01);
1571 hw_stats->vpath_genstats_count3 =
1572 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
1573 val64);
1574
1575 val64 = readq(&vp_reg->vpath_genstats_count4);
1576 hw_stats->vpath_genstats_count4 =
1577 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
1578 val64);
1579
1580 val64 = readq(&vp_reg->vpath_genstats_count5);
1581 hw_stats->vpath_genstats_count5 =
1582 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
1583 val64);
1584
1585 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
1586 if (status != VXGE_HW_OK)
1587 goto exit;
1588
1589 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
1590 if (status != VXGE_HW_OK)
1591 goto exit;
1592
1593 VXGE_HW_VPATH_STATS_PIO_READ(
1594 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
1595
1596 hw_stats->prog_event_vnum0 =
1597 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
1598
1599 hw_stats->prog_event_vnum1 =
1600 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
1601
1602 VXGE_HW_VPATH_STATS_PIO_READ(
1603 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
1604
1605 hw_stats->prog_event_vnum2 =
1606 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
1607
1608 hw_stats->prog_event_vnum3 =
1609 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
1610
1611 val64 = readq(&vp_reg->rx_multi_cast_stats);
1612 hw_stats->rx_multi_cast_frame_discard =
1613 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
1614
1615 val64 = readq(&vp_reg->rx_frm_transferred);
1616 hw_stats->rx_frm_transferred =
1617 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
1618
1619 val64 = readq(&vp_reg->rxd_returned);
1620 hw_stats->rxd_returned =
1621 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
1622
1623 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
1624 hw_stats->rx_mpa_len_fail_frms =
1625 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
1626 hw_stats->rx_mpa_mrk_fail_frms =
1627 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
1628 hw_stats->rx_mpa_crc_fail_frms =
1629 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
1630
1631 val64 = readq(&vp_reg->dbg_stats_rx_fau);
1632 hw_stats->rx_permitted_frms =
1633 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
1634 hw_stats->rx_vp_reset_discarded_frms =
1635 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
1636 hw_stats->rx_wol_frms =
1637 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
1638
1639 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
1640 hw_stats->tx_vp_reset_discarded_frms =
1641 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
1642 val64);
1643exit:
1644 return status;
1645}
1646
1647/*
1648 * vxge_hw_device_stats_get - Get the device hw statistics.
1649 * Returns the vpath h/w stats for the device.
1650 */
1651enum vxge_hw_status
1652vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
1653 struct vxge_hw_device_stats_hw_info *hw_stats)
1654{
1655 u32 i;
1656 enum vxge_hw_status status = VXGE_HW_OK;
1657
1658 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1659 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
1660 (hldev->virtual_paths[i].vp_open ==
1661 VXGE_HW_VP_NOT_OPEN))
1662 continue;
1663
1664 memcpy(hldev->virtual_paths[i].hw_stats_sav,
1665 hldev->virtual_paths[i].hw_stats,
1666 sizeof(struct vxge_hw_vpath_stats_hw_info));
1667
1668 status = __vxge_hw_vpath_stats_get(
1669 &hldev->virtual_paths[i],
1670 hldev->virtual_paths[i].hw_stats);
1671 }
1672
1673 memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
1674 sizeof(struct vxge_hw_device_stats_hw_info));
1675
1676 return status;
1677}
1678
1679/*
1680 * vxge_hw_driver_stats_get - Get the device sw statistics.
1681 * Returns the vpath s/w stats for the device.
1682 */
1683enum vxge_hw_status vxge_hw_driver_stats_get(
1684 struct __vxge_hw_device *hldev,
1685 struct vxge_hw_device_stats_sw_info *sw_stats)
1686{
1687 enum vxge_hw_status status = VXGE_HW_OK;
1688
1689 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
1690 sizeof(struct vxge_hw_device_stats_sw_info));
1691
1692 return status;
1693}
1694
1695/*
1696 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
1697 * and offset and perform an operation
1698 * Get the statistics from the given location and offset.
1699 */
1700enum vxge_hw_status
1701vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
1702 u32 operation, u32 location, u32 offset, u64 *stat)
1703{
1704 u64 val64;
1705 enum vxge_hw_status status = VXGE_HW_OK;
1706
1707 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1708 hldev->func_id);
1709 if (status != VXGE_HW_OK)
1710 goto exit;
1711
1712 val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
1713 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
1714 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
1715 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
1716
1717 status = __vxge_hw_pio_mem_write64(val64,
1718 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
1719 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
1720 hldev->config.device_poll_millis);
1721
1722 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1723 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
1724 else
1725 *stat = 0;
1726exit:
1727 return status;
1728}
1729
1730/*
1731 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
1732 * Get the Statistics on aggregate port
1733 */
1734static enum vxge_hw_status
1735vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
1736 struct vxge_hw_xmac_aggr_stats *aggr_stats)
1737{
1738 u64 *val64;
1739 int i;
1740 u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
1741 enum vxge_hw_status status = VXGE_HW_OK;
1742
1743 val64 = (u64 *)aggr_stats;
1744
1745 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1746 hldev->func_id);
1747 if (status != VXGE_HW_OK)
1748 goto exit;
1749
1750 for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
1751 status = vxge_hw_mrpcim_stats_access(hldev,
1752 VXGE_HW_STATS_OP_READ,
1753 VXGE_HW_STATS_LOC_AGGR,
1754 ((offset + (104 * port)) >> 3), val64);
1755 if (status != VXGE_HW_OK)
1756 goto exit;
1757
1758 offset += 8;
1759 val64++;
1760 }
1761exit:
1762 return status;
1763}
1764
1765/*
1766 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1767 * Get the Statistics on port
1768 */
1769static enum vxge_hw_status
1770vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1771 struct vxge_hw_xmac_port_stats *port_stats)
1772{
1773 u64 *val64;
1774 enum vxge_hw_status status = VXGE_HW_OK;
1775 int i;
1776 u32 offset = 0x0;
1777 val64 = (u64 *) port_stats;
1778
1779 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1780 hldev->func_id);
1781 if (status != VXGE_HW_OK)
1782 goto exit;
1783
1784 for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1785 status = vxge_hw_mrpcim_stats_access(hldev,
1786 VXGE_HW_STATS_OP_READ,
1787 VXGE_HW_STATS_LOC_AGGR,
1788 ((offset + (608 * port)) >> 3), val64);
1789 if (status != VXGE_HW_OK)
1790 goto exit;
1791
1792 offset += 8;
1793 val64++;
1794 }
1795
1796exit:
1797 return status;
1798}
1799
1800/*
1801 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1802 * Get the XMAC Statistics
1803 */
1804enum vxge_hw_status
1805vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1806 struct vxge_hw_xmac_stats *xmac_stats)
1807{
1808 enum vxge_hw_status status = VXGE_HW_OK;
1809 u32 i;
1810
1811 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1812 0, &xmac_stats->aggr_stats[0]);
1813 if (status != VXGE_HW_OK)
1814 goto exit;
1815
1816 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1817 1, &xmac_stats->aggr_stats[1]);
1818 if (status != VXGE_HW_OK)
1819 goto exit;
1820
1821 for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1822
1823 status = vxge_hw_device_xmac_port_stats_get(hldev,
1824 i, &xmac_stats->port_stats[i]);
1825 if (status != VXGE_HW_OK)
1826 goto exit;
1827 }
1828
1829 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1830
1831 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1832 continue;
1833
1834 status = __vxge_hw_vpath_xmac_tx_stats_get(
1835 &hldev->virtual_paths[i],
1836 &xmac_stats->vpath_tx_stats[i]);
1837 if (status != VXGE_HW_OK)
1838 goto exit;
1839
1840 status = __vxge_hw_vpath_xmac_rx_stats_get(
1841 &hldev->virtual_paths[i],
1842 &xmac_stats->vpath_rx_stats[i]);
1843 if (status != VXGE_HW_OK)
1844 goto exit;
1845 }
1846exit:
1847 return status;
1848}
1849
1850/*
1851 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1852 * This routine is used to dynamically change the debug output
1853 */
1854void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1855 enum vxge_debug_level level, u32 mask)
1856{
1857 if (hldev == NULL)
1858 return;
1859
1860#if defined(VXGE_DEBUG_TRACE_MASK) || \
1861 defined(VXGE_DEBUG_ERR_MASK)
1862 hldev->debug_module_mask = mask;
1863 hldev->debug_level = level;
1864#endif
1865
1866#if defined(VXGE_DEBUG_ERR_MASK)
1867 hldev->level_err = level & VXGE_ERR;
1868#endif
1869
1870#if defined(VXGE_DEBUG_TRACE_MASK)
1871 hldev->level_trace = level & VXGE_TRACE;
1872#endif
1873}
1874
1875/*
1876 * vxge_hw_device_error_level_get - Get the error level
1877 * This routine returns the current error level set
1878 */
1879u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1880{
1881#if defined(VXGE_DEBUG_ERR_MASK)
1882 if (hldev == NULL)
1883 return VXGE_ERR;
1884 else
1885 return hldev->level_err;
1886#else
1887 return 0;
1888#endif
1889}
1890
1891/*
1892 * vxge_hw_device_trace_level_get - Get the trace level
1893 * This routine returns the current trace level set
1894 */
1895u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1896{
1897#if defined(VXGE_DEBUG_TRACE_MASK)
1898 if (hldev == NULL)
1899 return VXGE_TRACE;
1900 else
1901 return hldev->level_trace;
1902#else
1903 return 0;
1904#endif
1905}
1906
1907/*
1908 * vxge_hw_getpause_data -Pause frame frame generation and reception.
1909 * Returns the Pause frame generation and reception capability of the NIC.
1910 */
1911enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1912 u32 port, u32 *tx, u32 *rx)
1913{
1914 u64 val64;
1915 enum vxge_hw_status status = VXGE_HW_OK;
1916
1917 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1918 status = VXGE_HW_ERR_INVALID_DEVICE;
1919 goto exit;
1920 }
1921
1922 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1923 status = VXGE_HW_ERR_INVALID_PORT;
1924 goto exit;
1925 }
1926
1927 if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1928 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1929 goto exit;
1930 }
1931
1932 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1933 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1934 *tx = 1;
1935 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1936 *rx = 1;
1937exit:
1938 return status;
1939}
1940
1941/*
1942 * vxge_hw_device_setpause_data - set/reset pause frame generation.
1943 * It can be used to set or reset Pause frame generation or reception
1944 * support of the NIC.
1945 */
1946enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1947 u32 port, u32 tx, u32 rx)
1948{
1949 u64 val64;
1950 enum vxge_hw_status status = VXGE_HW_OK;
1951
1952 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1953 status = VXGE_HW_ERR_INVALID_DEVICE;
1954 goto exit;
1955 }
1956
1957 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1958 status = VXGE_HW_ERR_INVALID_PORT;
1959 goto exit;
1960 }
1961
1962 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1963 hldev->func_id);
1964 if (status != VXGE_HW_OK)
1965 goto exit;
1966
1967 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1968 if (tx)
1969 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1970 else
1971 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1972 if (rx)
1973 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1974 else
1975 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1976
1977 writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1978exit:
1979 return status;
1980}
1981
1982u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1983{
1984 struct pci_dev *dev = hldev->pdev;
1985 u16 lnk;
1986
1987 pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
1988 return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1989}
1990
1991/*
1992 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1993 * This function returns the index of memory block
1994 */
1995static inline u32
1996__vxge_hw_ring_block_memblock_idx(u8 *block)
1997{
1998 return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1999}
2000
2001/*
2002 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
2003 * This function sets index to a memory block
2004 */
2005static inline void
2006__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
2007{
2008 *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
2009}
2010
2011/*
2012 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
2013 * in RxD block
2014 * Sets the next block pointer in RxD block
2015 */
2016static inline void
2017__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
2018{
2019 *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
2020}
2021
2022/*
2023 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
2024 * first block
2025 * Returns the dma address of the first RxD block
2026 */
2027static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
2028{
2029 struct vxge_hw_mempool_dma *dma_object;
2030
2031 dma_object = ring->mempool->memblocks_dma_arr;
2032 vxge_assert(dma_object != NULL);
2033
2034 return dma_object->addr;
2035}
2036
2037/*
2038 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
2039 * This function returns the dma address of a given item
2040 */
2041static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
2042 void *item)
2043{
2044 u32 memblock_idx;
2045 void *memblock;
2046 struct vxge_hw_mempool_dma *memblock_dma_object;
2047 ptrdiff_t dma_item_offset;
2048
2049 /* get owner memblock index */
2050 memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
2051
2052 /* get owner memblock by memblock index */
2053 memblock = mempoolh->memblocks_arr[memblock_idx];
2054
2055 /* get memblock DMA object by memblock index */
2056 memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
2057
2058 /* calculate offset in the memblock of this item */
2059 dma_item_offset = (u8 *)item - (u8 *)memblock;
2060
2061 return memblock_dma_object->addr + dma_item_offset;
2062}
2063
2064/*
2065 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
2066 * This function returns the dma address of a given item
2067 */
2068static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
2069 struct __vxge_hw_ring *ring, u32 from,
2070 u32 to)
2071{
2072 u8 *to_item , *from_item;
2073 dma_addr_t to_dma;
2074
2075 /* get "from" RxD block */
2076 from_item = mempoolh->items_arr[from];
2077 vxge_assert(from_item);
2078
2079 /* get "to" RxD block */
2080 to_item = mempoolh->items_arr[to];
2081 vxge_assert(to_item);
2082
2083 /* return address of the beginning of previous RxD block */
2084 to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
2085
2086 /* set next pointer for this RxD block to point on
2087 * previous item's DMA start address */
2088 __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
2089}
2090
2091/*
2092 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
2093 * block callback
2094 * This function is callback passed to __vxge_hw_mempool_create to create memory
2095 * pool for RxD block
2096 */
2097static void
2098__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
2099 u32 memblock_index,
2100 struct vxge_hw_mempool_dma *dma_object,
2101 u32 index, u32 is_last)
2102{
2103 u32 i;
2104 void *item = mempoolh->items_arr[index];
2105 struct __vxge_hw_ring *ring =
2106 (struct __vxge_hw_ring *)mempoolh->userdata;
2107
2108 /* format rxds array */
2109 for (i = 0; i < ring->rxds_per_block; i++) {
2110 void *rxdblock_priv;
2111 void *uld_priv;
2112 struct vxge_hw_ring_rxd_1 *rxdp;
2113
2114 u32 reserve_index = ring->channel.reserve_ptr -
2115 (index * ring->rxds_per_block + i + 1);
2116 u32 memblock_item_idx;
2117
2118 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
2119 i * ring->rxd_size;
2120
2121 /* Note: memblock_item_idx is index of the item within
2122 * the memblock. For instance, in case of three RxD-blocks
2123 * per memblock this value can be 0, 1 or 2. */
2124 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
2125 memblock_index, item,
2126 &memblock_item_idx);
2127
2128 rxdp = ring->channel.reserve_arr[reserve_index];
2129
2130 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
2131
2132 /* pre-format Host_Control */
2133 rxdp->host_control = (u64)(size_t)uld_priv;
2134 }
2135
2136 __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
2137
2138 if (is_last) {
2139 /* link last one with first one */
2140 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
2141 }
2142
2143 if (index > 0) {
2144 /* link this RxD block with previous one */
2145 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
2146 }
2147}
2148
2149/*
2150 * __vxge_hw_ring_replenish - Initial replenish of RxDs
2151 * This function replenishes the RxDs from reserve array to work array
2152 */
2153enum vxge_hw_status
2154vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
2155{
2156 void *rxd;
2157 struct __vxge_hw_channel *channel;
2158 enum vxge_hw_status status = VXGE_HW_OK;
2159
2160 channel = &ring->channel;
2161
2162 while (vxge_hw_channel_dtr_count(channel) > 0) {
2163
2164 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
2165
2166 vxge_assert(status == VXGE_HW_OK);
2167
2168 if (ring->rxd_init) {
2169 status = ring->rxd_init(rxd, channel->userdata);
2170 if (status != VXGE_HW_OK) {
2171 vxge_hw_ring_rxd_free(ring, rxd);
2172 goto exit;
2173 }
2174 }
2175
2176 vxge_hw_ring_rxd_post(ring, rxd);
2177 }
2178 status = VXGE_HW_OK;
2179exit:
2180 return status;
2181}
2182
2183/*
2184 * __vxge_hw_channel_allocate - Allocate memory for channel
2185 * This function allocates required memory for the channel and various arrays
2186 * in the channel
2187 */
2188static struct __vxge_hw_channel *
2189__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
2190 enum __vxge_hw_channel_type type,
2191 u32 length, u32 per_dtr_space,
2192 void *userdata)
2193{
2194 struct __vxge_hw_channel *channel;
2195 struct __vxge_hw_device *hldev;
2196 int size = 0;
2197 u32 vp_id;
2198
2199 hldev = vph->vpath->hldev;
2200 vp_id = vph->vpath->vp_id;
2201
2202 switch (type) {
2203 case VXGE_HW_CHANNEL_TYPE_FIFO:
2204 size = sizeof(struct __vxge_hw_fifo);
2205 break;
2206 case VXGE_HW_CHANNEL_TYPE_RING:
2207 size = sizeof(struct __vxge_hw_ring);
2208 break;
2209 default:
2210 break;
2211 }
2212
2213 channel = kzalloc(size, GFP_KERNEL);
2214 if (channel == NULL)
2215 goto exit0;
2216 INIT_LIST_HEAD(&channel->item);
2217
2218 channel->common_reg = hldev->common_reg;
2219 channel->first_vp_id = hldev->first_vp_id;
2220 channel->type = type;
2221 channel->devh = hldev;
2222 channel->vph = vph;
2223 channel->userdata = userdata;
2224 channel->per_dtr_space = per_dtr_space;
2225 channel->length = length;
2226 channel->vp_id = vp_id;
2227
2228 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2229 if (channel->work_arr == NULL)
2230 goto exit1;
2231
2232 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2233 if (channel->free_arr == NULL)
2234 goto exit1;
2235 channel->free_ptr = length;
2236
2237 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2238 if (channel->reserve_arr == NULL)
2239 goto exit1;
2240 channel->reserve_ptr = length;
2241 channel->reserve_top = 0;
2242
2243 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2244 if (channel->orig_arr == NULL)
2245 goto exit1;
2246
2247 return channel;
2248exit1:
2249 __vxge_hw_channel_free(channel);
2250
2251exit0:
2252 return NULL;
2253}
2254
2255/*
2256 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
2257 * Adds a block to block pool
2258 */
2259static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
2260 void *block_addr,
2261 u32 length,
2262 struct pci_dev *dma_h,
2263 struct pci_dev *acc_handle)
2264{
2265 struct __vxge_hw_blockpool *blockpool;
2266 struct __vxge_hw_blockpool_entry *entry = NULL;
2267 dma_addr_t dma_addr;
2268 enum vxge_hw_status status = VXGE_HW_OK;
2269 u32 req_out;
2270
2271 blockpool = &devh->block_pool;
2272
2273 if (block_addr == NULL) {
2274 blockpool->req_out--;
2275 status = VXGE_HW_FAIL;
2276 goto exit;
2277 }
2278
2279 dma_addr = pci_map_single(devh->pdev, block_addr, length,
2280 PCI_DMA_BIDIRECTIONAL);
2281
2282 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
2283 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
2284 blockpool->req_out--;
2285 status = VXGE_HW_FAIL;
2286 goto exit;
2287 }
2288
2289 if (!list_empty(&blockpool->free_entry_list))
2290 entry = (struct __vxge_hw_blockpool_entry *)
2291 list_first_entry(&blockpool->free_entry_list,
2292 struct __vxge_hw_blockpool_entry,
2293 item);
2294
2295 if (entry == NULL)
2296 entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
2297 else
2298 list_del(&entry->item);
2299
2300 if (entry != NULL) {
2301 entry->length = length;
2302 entry->memblock = block_addr;
2303 entry->dma_addr = dma_addr;
2304 entry->acc_handle = acc_handle;
2305 entry->dma_handle = dma_h;
2306 list_add(&entry->item, &blockpool->free_block_list);
2307 blockpool->pool_size++;
2308 status = VXGE_HW_OK;
2309 } else
2310 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2311
2312 blockpool->req_out--;
2313
2314 req_out = blockpool->req_out;
2315exit:
2316 return;
2317}
2318
2319static inline void
2320vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
2321{
2322 gfp_t flags;
2323 void *vaddr;
2324
2325 if (in_interrupt())
2326 flags = GFP_ATOMIC | GFP_DMA;
2327 else
2328 flags = GFP_KERNEL | GFP_DMA;
2329
2330 vaddr = kmalloc((size), flags);
2331
2332 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
2333}
2334
2335/*
2336 * __vxge_hw_blockpool_blocks_add - Request additional blocks
2337 */
2338static
2339void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
2340{
2341 u32 nreq = 0, i;
2342
2343 if ((blockpool->pool_size + blockpool->req_out) <
2344 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
2345 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
2346 blockpool->req_out += nreq;
2347 }
2348
2349 for (i = 0; i < nreq; i++)
2350 vxge_os_dma_malloc_async(
2351 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2352 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
2353}
2354
2355/*
2356 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
2357 * Allocates a block of memory of given size, either from block pool
2358 * or by calling vxge_os_dma_malloc()
2359 */
2360static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
2361 struct vxge_hw_mempool_dma *dma_object)
2362{
2363 struct __vxge_hw_blockpool_entry *entry = NULL;
2364 struct __vxge_hw_blockpool *blockpool;
2365 void *memblock = NULL;
2366 enum vxge_hw_status status = VXGE_HW_OK;
2367
2368 blockpool = &devh->block_pool;
2369
2370 if (size != blockpool->block_size) {
2371
2372 memblock = vxge_os_dma_malloc(devh->pdev, size,
2373 &dma_object->handle,
2374 &dma_object->acc_handle);
2375
2376 if (memblock == NULL) {
2377 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2378 goto exit;
2379 }
2380
2381 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
2382 PCI_DMA_BIDIRECTIONAL);
2383
2384 if (unlikely(pci_dma_mapping_error(devh->pdev,
2385 dma_object->addr))) {
2386 vxge_os_dma_free(devh->pdev, memblock,
2387 &dma_object->acc_handle);
2388 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2389 goto exit;
2390 }
2391
2392 } else {
2393
2394 if (!list_empty(&blockpool->free_block_list))
2395 entry = (struct __vxge_hw_blockpool_entry *)
2396 list_first_entry(&blockpool->free_block_list,
2397 struct __vxge_hw_blockpool_entry,
2398 item);
2399
2400 if (entry != NULL) {
2401 list_del(&entry->item);
2402 dma_object->addr = entry->dma_addr;
2403 dma_object->handle = entry->dma_handle;
2404 dma_object->acc_handle = entry->acc_handle;
2405 memblock = entry->memblock;
2406
2407 list_add(&entry->item,
2408 &blockpool->free_entry_list);
2409 blockpool->pool_size--;
2410 }
2411
2412 if (memblock != NULL)
2413 __vxge_hw_blockpool_blocks_add(blockpool);
2414 }
2415exit:
2416 return memblock;
2417}
2418
2419/*
2420 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
2421 */
2422static void
2423__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
2424{
2425 struct list_head *p, *n;
2426
2427 list_for_each_safe(p, n, &blockpool->free_block_list) {
2428
2429 if (blockpool->pool_size < blockpool->pool_max)
2430 break;
2431
2432 pci_unmap_single(
2433 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2434 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
2435 ((struct __vxge_hw_blockpool_entry *)p)->length,
2436 PCI_DMA_BIDIRECTIONAL);
2437
2438 vxge_os_dma_free(
2439 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2440 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
2441 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
2442
2443 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
2444
2445 list_add(p, &blockpool->free_entry_list);
2446
2447 blockpool->pool_size--;
2448
2449 }
2450}
2451
2452/*
2453 * __vxge_hw_blockpool_free - Frees the memory allcoated with
2454 * __vxge_hw_blockpool_malloc
2455 */
2456static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
2457 void *memblock, u32 size,
2458 struct vxge_hw_mempool_dma *dma_object)
2459{
2460 struct __vxge_hw_blockpool_entry *entry = NULL;
2461 struct __vxge_hw_blockpool *blockpool;
2462 enum vxge_hw_status status = VXGE_HW_OK;
2463
2464 blockpool = &devh->block_pool;
2465
2466 if (size != blockpool->block_size) {
2467 pci_unmap_single(devh->pdev, dma_object->addr, size,
2468 PCI_DMA_BIDIRECTIONAL);
2469 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
2470 } else {
2471
2472 if (!list_empty(&blockpool->free_entry_list))
2473 entry = (struct __vxge_hw_blockpool_entry *)
2474 list_first_entry(&blockpool->free_entry_list,
2475 struct __vxge_hw_blockpool_entry,
2476 item);
2477
2478 if (entry == NULL)
2479 entry = vmalloc(sizeof(
2480 struct __vxge_hw_blockpool_entry));
2481 else
2482 list_del(&entry->item);
2483
2484 if (entry != NULL) {
2485 entry->length = size;
2486 entry->memblock = memblock;
2487 entry->dma_addr = dma_object->addr;
2488 entry->acc_handle = dma_object->acc_handle;
2489 entry->dma_handle = dma_object->handle;
2490 list_add(&entry->item,
2491 &blockpool->free_block_list);
2492 blockpool->pool_size++;
2493 status = VXGE_HW_OK;
2494 } else
2495 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2496
2497 if (status == VXGE_HW_OK)
2498 __vxge_hw_blockpool_blocks_remove(blockpool);
2499 }
2500}
2501
2502/*
2503 * vxge_hw_mempool_destroy
2504 */
2505static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
2506{
2507 u32 i, j;
2508 struct __vxge_hw_device *devh = mempool->devh;
2509
2510 for (i = 0; i < mempool->memblocks_allocated; i++) {
2511 struct vxge_hw_mempool_dma *dma_object;
2512
2513 vxge_assert(mempool->memblocks_arr[i]);
2514 vxge_assert(mempool->memblocks_dma_arr + i);
2515
2516 dma_object = mempool->memblocks_dma_arr + i;
2517
2518 for (j = 0; j < mempool->items_per_memblock; j++) {
2519 u32 index = i * mempool->items_per_memblock + j;
2520
2521 /* to skip last partially filled(if any) memblock */
2522 if (index >= mempool->items_current)
2523 break;
2524 }
2525
2526 vfree(mempool->memblocks_priv_arr[i]);
2527
2528 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
2529 mempool->memblock_size, dma_object);
2530 }
2531
2532 vfree(mempool->items_arr);
2533 vfree(mempool->memblocks_dma_arr);
2534 vfree(mempool->memblocks_priv_arr);
2535 vfree(mempool->memblocks_arr);
2536 vfree(mempool);
2537}
2538
2539/*
2540 * __vxge_hw_mempool_grow
2541 * Will resize mempool up to %num_allocate value.
2542 */
2543static enum vxge_hw_status
2544__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
2545 u32 *num_allocated)
2546{
2547 u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
2548 u32 n_items = mempool->items_per_memblock;
2549 u32 start_block_idx = mempool->memblocks_allocated;
2550 u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
2551 enum vxge_hw_status status = VXGE_HW_OK;
2552
2553 *num_allocated = 0;
2554
2555 if (end_block_idx > mempool->memblocks_max) {
2556 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2557 goto exit;
2558 }
2559
2560 for (i = start_block_idx; i < end_block_idx; i++) {
2561 u32 j;
2562 u32 is_last = ((end_block_idx - 1) == i);
2563 struct vxge_hw_mempool_dma *dma_object =
2564 mempool->memblocks_dma_arr + i;
2565 void *the_memblock;
2566
2567 /* allocate memblock's private part. Each DMA memblock
2568 * has a space allocated for item's private usage upon
2569 * mempool's user request. Each time mempool grows, it will
2570 * allocate new memblock and its private part at once.
2571 * This helps to minimize memory usage a lot. */
2572 mempool->memblocks_priv_arr[i] =
2573 vzalloc(mempool->items_priv_size * n_items);
2574 if (mempool->memblocks_priv_arr[i] == NULL) {
2575 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2576 goto exit;
2577 }
2578
2579 /* allocate DMA-capable memblock */
2580 mempool->memblocks_arr[i] =
2581 __vxge_hw_blockpool_malloc(mempool->devh,
2582 mempool->memblock_size, dma_object);
2583 if (mempool->memblocks_arr[i] == NULL) {
2584 vfree(mempool->memblocks_priv_arr[i]);
2585 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2586 goto exit;
2587 }
2588
2589 (*num_allocated)++;
2590 mempool->memblocks_allocated++;
2591
2592 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
2593
2594 the_memblock = mempool->memblocks_arr[i];
2595
2596 /* fill the items hash array */
2597 for (j = 0; j < n_items; j++) {
2598 u32 index = i * n_items + j;
2599
2600 if (first_time && index >= mempool->items_initial)
2601 break;
2602
2603 mempool->items_arr[index] =
2604 ((char *)the_memblock + j*mempool->item_size);
2605
2606 /* let caller to do more job on each item */
2607 if (mempool->item_func_alloc != NULL)
2608 mempool->item_func_alloc(mempool, i,
2609 dma_object, index, is_last);
2610
2611 mempool->items_current = index + 1;
2612 }
2613
2614 if (first_time && mempool->items_current ==
2615 mempool->items_initial)
2616 break;
2617 }
2618exit:
2619 return status;
2620}
2621
2622/*
2623 * vxge_hw_mempool_create
2624 * This function will create memory pool object. Pool may grow but will
2625 * never shrink. Pool consists of number of dynamically allocated blocks
2626 * with size enough to hold %items_initial number of items. Memory is
2627 * DMA-able but client must map/unmap before interoperating with the device.
2628 */
2629static struct vxge_hw_mempool *
2630__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
2631 u32 memblock_size,
2632 u32 item_size,
2633 u32 items_priv_size,
2634 u32 items_initial,
2635 u32 items_max,
2636 struct vxge_hw_mempool_cbs *mp_callback,
2637 void *userdata)
2638{
2639 enum vxge_hw_status status = VXGE_HW_OK;
2640 u32 memblocks_to_allocate;
2641 struct vxge_hw_mempool *mempool = NULL;
2642 u32 allocated;
2643
2644 if (memblock_size < item_size) {
2645 status = VXGE_HW_FAIL;
2646 goto exit;
2647 }
2648
2649 mempool = vzalloc(sizeof(struct vxge_hw_mempool));
2650 if (mempool == NULL) {
2651 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2652 goto exit;
2653 }
2654
2655 mempool->devh = devh;
2656 mempool->memblock_size = memblock_size;
2657 mempool->items_max = items_max;
2658 mempool->items_initial = items_initial;
2659 mempool->item_size = item_size;
2660 mempool->items_priv_size = items_priv_size;
2661 mempool->item_func_alloc = mp_callback->item_func_alloc;
2662 mempool->userdata = userdata;
2663
2664 mempool->memblocks_allocated = 0;
2665
2666 mempool->items_per_memblock = memblock_size / item_size;
2667
2668 mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
2669 mempool->items_per_memblock;
2670
2671 /* allocate array of memblocks */
2672 mempool->memblocks_arr =
2673 vzalloc(sizeof(void *) * mempool->memblocks_max);
2674 if (mempool->memblocks_arr == NULL) {
2675 __vxge_hw_mempool_destroy(mempool);
2676 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2677 mempool = NULL;
2678 goto exit;
2679 }
2680
2681 /* allocate array of private parts of items per memblocks */
2682 mempool->memblocks_priv_arr =
2683 vzalloc(sizeof(void *) * mempool->memblocks_max);
2684 if (mempool->memblocks_priv_arr == NULL) {
2685 __vxge_hw_mempool_destroy(mempool);
2686 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2687 mempool = NULL;
2688 goto exit;
2689 }
2690
2691 /* allocate array of memblocks DMA objects */
2692 mempool->memblocks_dma_arr =
2693 vzalloc(sizeof(struct vxge_hw_mempool_dma) *
2694 mempool->memblocks_max);
2695 if (mempool->memblocks_dma_arr == NULL) {
2696 __vxge_hw_mempool_destroy(mempool);
2697 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2698 mempool = NULL;
2699 goto exit;
2700 }
2701
2702 /* allocate hash array of items */
2703 mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
2704 if (mempool->items_arr == NULL) {
2705 __vxge_hw_mempool_destroy(mempool);
2706 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2707 mempool = NULL;
2708 goto exit;
2709 }
2710
2711 /* calculate initial number of memblocks */
2712 memblocks_to_allocate = (mempool->items_initial +
2713 mempool->items_per_memblock - 1) /
2714 mempool->items_per_memblock;
2715
2716 /* pre-allocate the mempool */
2717 status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
2718 &allocated);
2719 if (status != VXGE_HW_OK) {
2720 __vxge_hw_mempool_destroy(mempool);
2721 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2722 mempool = NULL;
2723 goto exit;
2724 }
2725
2726exit:
2727 return mempool;
2728}
2729
2730/*
2731 * __vxge_hw_ring_abort - Returns the RxD
2732 * This function terminates the RxDs of ring
2733 */
2734static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
2735{
2736 void *rxdh;
2737 struct __vxge_hw_channel *channel;
2738
2739 channel = &ring->channel;
2740
2741 for (;;) {
2742 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
2743
2744 if (rxdh == NULL)
2745 break;
2746
2747 vxge_hw_channel_dtr_complete(channel);
2748
2749 if (ring->rxd_term)
2750 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
2751 channel->userdata);
2752
2753 vxge_hw_channel_dtr_free(channel, rxdh);
2754 }
2755
2756 return VXGE_HW_OK;
2757}
2758
2759/*
2760 * __vxge_hw_ring_reset - Resets the ring
2761 * This function resets the ring during vpath reset operation
2762 */
2763static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
2764{
2765 enum vxge_hw_status status = VXGE_HW_OK;
2766 struct __vxge_hw_channel *channel;
2767
2768 channel = &ring->channel;
2769
2770 __vxge_hw_ring_abort(ring);
2771
2772 status = __vxge_hw_channel_reset(channel);
2773
2774 if (status != VXGE_HW_OK)
2775 goto exit;
2776
2777 if (ring->rxd_init) {
2778 status = vxge_hw_ring_replenish(ring);
2779 if (status != VXGE_HW_OK)
2780 goto exit;
2781 }
2782exit:
2783 return status;
2784}
2785
2786/*
2787 * __vxge_hw_ring_delete - Removes the ring
2788 * This function freeup the memory pool and removes the ring
2789 */
2790static enum vxge_hw_status
2791__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
2792{
2793 struct __vxge_hw_ring *ring = vp->vpath->ringh;
2794
2795 __vxge_hw_ring_abort(ring);
2796
2797 if (ring->mempool)
2798 __vxge_hw_mempool_destroy(ring->mempool);
2799
2800 vp->vpath->ringh = NULL;
2801 __vxge_hw_channel_free(&ring->channel);
2802
2803 return VXGE_HW_OK;
2804}
2805
2806/*
2807 * __vxge_hw_ring_create - Create a Ring
2808 * This function creates Ring and initializes it.
2809 */
2810static enum vxge_hw_status
2811__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2812 struct vxge_hw_ring_attr *attr)
2813{
2814 enum vxge_hw_status status = VXGE_HW_OK;
2815 struct __vxge_hw_ring *ring;
2816 u32 ring_length;
2817 struct vxge_hw_ring_config *config;
2818 struct __vxge_hw_device *hldev;
2819 u32 vp_id;
2820 struct vxge_hw_mempool_cbs ring_mp_callback;
2821
2822 if ((vp == NULL) || (attr == NULL)) {
2823 status = VXGE_HW_FAIL;
2824 goto exit;
2825 }
2826
2827 hldev = vp->vpath->hldev;
2828 vp_id = vp->vpath->vp_id;
2829
2830 config = &hldev->config.vp_config[vp_id].ring;
2831
2832 ring_length = config->ring_blocks *
2833 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2834
2835 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
2836 VXGE_HW_CHANNEL_TYPE_RING,
2837 ring_length,
2838 attr->per_rxd_space,
2839 attr->userdata);
2840 if (ring == NULL) {
2841 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2842 goto exit;
2843 }
2844
2845 vp->vpath->ringh = ring;
2846 ring->vp_id = vp_id;
2847 ring->vp_reg = vp->vpath->vp_reg;
2848 ring->common_reg = hldev->common_reg;
2849 ring->stats = &vp->vpath->sw_stats->ring_stats;
2850 ring->config = config;
2851 ring->callback = attr->callback;
2852 ring->rxd_init = attr->rxd_init;
2853 ring->rxd_term = attr->rxd_term;
2854 ring->buffer_mode = config->buffer_mode;
2855 ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
2856 ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
2857 ring->rxds_limit = config->rxds_limit;
2858
2859 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
2860 ring->rxd_priv_size =
2861 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
2862 ring->per_rxd_space = attr->per_rxd_space;
2863
2864 ring->rxd_priv_size =
2865 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
2866 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2867
2868 /* how many RxDs can fit into one block. Depends on configured
2869 * buffer_mode. */
2870 ring->rxds_per_block =
2871 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2872
2873 /* calculate actual RxD block private size */
2874 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
2875 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
2876 ring->mempool = __vxge_hw_mempool_create(hldev,
2877 VXGE_HW_BLOCK_SIZE,
2878 VXGE_HW_BLOCK_SIZE,
2879 ring->rxdblock_priv_size,
2880 ring->config->ring_blocks,
2881 ring->config->ring_blocks,
2882 &ring_mp_callback,
2883 ring);
2884 if (ring->mempool == NULL) {
2885 __vxge_hw_ring_delete(vp);
2886 return VXGE_HW_ERR_OUT_OF_MEMORY;
2887 }
2888
2889 status = __vxge_hw_channel_initialize(&ring->channel);
2890 if (status != VXGE_HW_OK) {
2891 __vxge_hw_ring_delete(vp);
2892 goto exit;
2893 }
2894
2895 /* Note:
2896 * Specifying rxd_init callback means two things:
2897 * 1) rxds need to be initialized by driver at channel-open time;
2898 * 2) rxds need to be posted at channel-open time
2899 * (that's what the initial_replenish() below does)
2900 * Currently we don't have a case when the 1) is done without the 2).
2901 */
2902 if (ring->rxd_init) {
2903 status = vxge_hw_ring_replenish(ring);
2904 if (status != VXGE_HW_OK) {
2905 __vxge_hw_ring_delete(vp);
2906 goto exit;
2907 }
2908 }
2909
2910 /* initial replenish will increment the counter in its post() routine,
2911 * we have to reset it */
2912 ring->stats->common_stats.usage_cnt = 0;
2913exit:
2914 return status;
2915}
2916
2917/*
2918 * vxge_hw_device_config_default_get - Initialize device config with defaults.
2919 * Initialize Titan device config with default values.
2920 */
2921enum vxge_hw_status __devinit
2922vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2923{
2924 u32 i;
2925
2926 device_config->dma_blockpool_initial =
2927 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
2928 device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
2929 device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
2930 device_config->rth_en = VXGE_HW_RTH_DEFAULT;
2931 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
2932 device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
2933 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
2934
2935 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2936 device_config->vp_config[i].vp_id = i;
2937
2938 device_config->vp_config[i].min_bandwidth =
2939 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
2940
2941 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
2942
2943 device_config->vp_config[i].ring.ring_blocks =
2944 VXGE_HW_DEF_RING_BLOCKS;
2945
2946 device_config->vp_config[i].ring.buffer_mode =
2947 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
2948
2949 device_config->vp_config[i].ring.scatter_mode =
2950 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
2951
2952 device_config->vp_config[i].ring.rxds_limit =
2953 VXGE_HW_DEF_RING_RXDS_LIMIT;
2954
2955 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
2956
2957 device_config->vp_config[i].fifo.fifo_blocks =
2958 VXGE_HW_MIN_FIFO_BLOCKS;
2959
2960 device_config->vp_config[i].fifo.max_frags =
2961 VXGE_HW_MAX_FIFO_FRAGS;
2962
2963 device_config->vp_config[i].fifo.memblock_size =
2964 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
2965
2966 device_config->vp_config[i].fifo.alignment_size =
2967 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
2968
2969 device_config->vp_config[i].fifo.intr =
2970 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
2971
2972 device_config->vp_config[i].fifo.no_snoop_bits =
2973 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
2974 device_config->vp_config[i].tti.intr_enable =
2975 VXGE_HW_TIM_INTR_DEFAULT;
2976
2977 device_config->vp_config[i].tti.btimer_val =
2978 VXGE_HW_USE_FLASH_DEFAULT;
2979
2980 device_config->vp_config[i].tti.timer_ac_en =
2981 VXGE_HW_USE_FLASH_DEFAULT;
2982
2983 device_config->vp_config[i].tti.timer_ci_en =
2984 VXGE_HW_USE_FLASH_DEFAULT;
2985
2986 device_config->vp_config[i].tti.timer_ri_en =
2987 VXGE_HW_USE_FLASH_DEFAULT;
2988
2989 device_config->vp_config[i].tti.rtimer_val =
2990 VXGE_HW_USE_FLASH_DEFAULT;
2991
2992 device_config->vp_config[i].tti.util_sel =
2993 VXGE_HW_USE_FLASH_DEFAULT;
2994
2995 device_config->vp_config[i].tti.ltimer_val =
2996 VXGE_HW_USE_FLASH_DEFAULT;
2997
2998 device_config->vp_config[i].tti.urange_a =
2999 VXGE_HW_USE_FLASH_DEFAULT;
3000
3001 device_config->vp_config[i].tti.uec_a =
3002 VXGE_HW_USE_FLASH_DEFAULT;
3003
3004 device_config->vp_config[i].tti.urange_b =
3005 VXGE_HW_USE_FLASH_DEFAULT;
3006
3007 device_config->vp_config[i].tti.uec_b =
3008 VXGE_HW_USE_FLASH_DEFAULT;
3009
3010 device_config->vp_config[i].tti.urange_c =
3011 VXGE_HW_USE_FLASH_DEFAULT;
3012
3013 device_config->vp_config[i].tti.uec_c =
3014 VXGE_HW_USE_FLASH_DEFAULT;
3015
3016 device_config->vp_config[i].tti.uec_d =
3017 VXGE_HW_USE_FLASH_DEFAULT;
3018
3019 device_config->vp_config[i].rti.intr_enable =
3020 VXGE_HW_TIM_INTR_DEFAULT;
3021
3022 device_config->vp_config[i].rti.btimer_val =
3023 VXGE_HW_USE_FLASH_DEFAULT;
3024
3025 device_config->vp_config[i].rti.timer_ac_en =
3026 VXGE_HW_USE_FLASH_DEFAULT;
3027
3028 device_config->vp_config[i].rti.timer_ci_en =
3029 VXGE_HW_USE_FLASH_DEFAULT;
3030
3031 device_config->vp_config[i].rti.timer_ri_en =
3032 VXGE_HW_USE_FLASH_DEFAULT;
3033
3034 device_config->vp_config[i].rti.rtimer_val =
3035 VXGE_HW_USE_FLASH_DEFAULT;
3036
3037 device_config->vp_config[i].rti.util_sel =
3038 VXGE_HW_USE_FLASH_DEFAULT;
3039
3040 device_config->vp_config[i].rti.ltimer_val =
3041 VXGE_HW_USE_FLASH_DEFAULT;
3042
3043 device_config->vp_config[i].rti.urange_a =
3044 VXGE_HW_USE_FLASH_DEFAULT;
3045
3046 device_config->vp_config[i].rti.uec_a =
3047 VXGE_HW_USE_FLASH_DEFAULT;
3048
3049 device_config->vp_config[i].rti.urange_b =
3050 VXGE_HW_USE_FLASH_DEFAULT;
3051
3052 device_config->vp_config[i].rti.uec_b =
3053 VXGE_HW_USE_FLASH_DEFAULT;
3054
3055 device_config->vp_config[i].rti.urange_c =
3056 VXGE_HW_USE_FLASH_DEFAULT;
3057
3058 device_config->vp_config[i].rti.uec_c =
3059 VXGE_HW_USE_FLASH_DEFAULT;
3060
3061 device_config->vp_config[i].rti.uec_d =
3062 VXGE_HW_USE_FLASH_DEFAULT;
3063
3064 device_config->vp_config[i].mtu =
3065 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
3066
3067 device_config->vp_config[i].rpa_strip_vlan_tag =
3068 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
3069 }
3070
3071 return VXGE_HW_OK;
3072}
3073
3074/*
3075 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
3076 * Set the swapper bits appropriately for the vpath.
3077 */
3078static enum vxge_hw_status
3079__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
3080{
3081#ifndef __BIG_ENDIAN
3082 u64 val64;
3083
3084 val64 = readq(&vpath_reg->vpath_general_cfg1);
3085 wmb();
3086 val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
3087 writeq(val64, &vpath_reg->vpath_general_cfg1);
3088 wmb();
3089#endif
3090 return VXGE_HW_OK;
3091}
3092
3093/*
3094 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
3095 * Set the swapper bits appropriately for the vpath.
3096 */
3097static enum vxge_hw_status
3098__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
3099 struct vxge_hw_vpath_reg __iomem *vpath_reg)
3100{
3101 u64 val64;
3102
3103 val64 = readq(&legacy_reg->pifm_wr_swap_en);
3104
3105 if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
3106 val64 = readq(&vpath_reg->kdfcctl_cfg0);
3107 wmb();
3108
3109 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
3110 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
3111 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
3112
3113 writeq(val64, &vpath_reg->kdfcctl_cfg0);
3114 wmb();
3115 }
3116
3117 return VXGE_HW_OK;
3118}
3119
3120/*
3121 * vxge_hw_mgmt_reg_read - Read Titan register.
3122 */
3123enum vxge_hw_status
3124vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
3125 enum vxge_hw_mgmt_reg_type type,
3126 u32 index, u32 offset, u64 *value)
3127{
3128 enum vxge_hw_status status = VXGE_HW_OK;
3129
3130 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3131 status = VXGE_HW_ERR_INVALID_DEVICE;
3132 goto exit;
3133 }
3134
3135 switch (type) {
3136 case vxge_hw_mgmt_reg_type_legacy:
3137 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
3138 status = VXGE_HW_ERR_INVALID_OFFSET;
3139 break;
3140 }
3141 *value = readq((void __iomem *)hldev->legacy_reg + offset);
3142 break;
3143 case vxge_hw_mgmt_reg_type_toc:
3144 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
3145 status = VXGE_HW_ERR_INVALID_OFFSET;
3146 break;
3147 }
3148 *value = readq((void __iomem *)hldev->toc_reg + offset);
3149 break;
3150 case vxge_hw_mgmt_reg_type_common:
3151 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
3152 status = VXGE_HW_ERR_INVALID_OFFSET;
3153 break;
3154 }
3155 *value = readq((void __iomem *)hldev->common_reg + offset);
3156 break;
3157 case vxge_hw_mgmt_reg_type_mrpcim:
3158 if (!(hldev->access_rights &
3159 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
3160 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3161 break;
3162 }
3163 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
3164 status = VXGE_HW_ERR_INVALID_OFFSET;
3165 break;
3166 }
3167 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
3168 break;
3169 case vxge_hw_mgmt_reg_type_srpcim:
3170 if (!(hldev->access_rights &
3171 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
3172 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3173 break;
3174 }
3175 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3176 status = VXGE_HW_ERR_INVALID_INDEX;
3177 break;
3178 }
3179 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
3180 status = VXGE_HW_ERR_INVALID_OFFSET;
3181 break;
3182 }
3183 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
3184 offset);
3185 break;
3186 case vxge_hw_mgmt_reg_type_vpmgmt:
3187 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3188 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3189 status = VXGE_HW_ERR_INVALID_INDEX;
3190 break;
3191 }
3192 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
3193 status = VXGE_HW_ERR_INVALID_OFFSET;
3194 break;
3195 }
3196 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
3197 offset);
3198 break;
3199 case vxge_hw_mgmt_reg_type_vpath:
3200 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
3201 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3202 status = VXGE_HW_ERR_INVALID_INDEX;
3203 break;
3204 }
3205 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
3206 status = VXGE_HW_ERR_INVALID_INDEX;
3207 break;
3208 }
3209 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
3210 status = VXGE_HW_ERR_INVALID_OFFSET;
3211 break;
3212 }
3213 *value = readq((void __iomem *)hldev->vpath_reg[index] +
3214 offset);
3215 break;
3216 default:
3217 status = VXGE_HW_ERR_INVALID_TYPE;
3218 break;
3219 }
3220
3221exit:
3222 return status;
3223}
3224
3225/*
3226 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
3227 */
3228enum vxge_hw_status
3229vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3230{
3231 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
3232 enum vxge_hw_status status = VXGE_HW_OK;
3233 int i = 0, j = 0;
3234
3235 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3236 if (!((vpath_mask) & vxge_mBIT(i)))
3237 continue;
3238 vpmgmt_reg = hldev->vpmgmt_reg[i];
3239 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
3240 if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
3241 & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
3242 return VXGE_HW_FAIL;
3243 }
3244 }
3245 return status;
3246}
3247/*
3248 * vxge_hw_mgmt_reg_Write - Write Titan register.
3249 */
3250enum vxge_hw_status
3251vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
3252 enum vxge_hw_mgmt_reg_type type,
3253 u32 index, u32 offset, u64 value)
3254{
3255 enum vxge_hw_status status = VXGE_HW_OK;
3256
3257 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3258 status = VXGE_HW_ERR_INVALID_DEVICE;
3259 goto exit;
3260 }
3261
3262 switch (type) {
3263 case vxge_hw_mgmt_reg_type_legacy:
3264 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
3265 status = VXGE_HW_ERR_INVALID_OFFSET;
3266 break;
3267 }
3268 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
3269 break;
3270 case vxge_hw_mgmt_reg_type_toc:
3271 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
3272 status = VXGE_HW_ERR_INVALID_OFFSET;
3273 break;
3274 }
3275 writeq(value, (void __iomem *)hldev->toc_reg + offset);
3276 break;
3277 case vxge_hw_mgmt_reg_type_common:
3278 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
3279 status = VXGE_HW_ERR_INVALID_OFFSET;
3280 break;
3281 }
3282 writeq(value, (void __iomem *)hldev->common_reg + offset);
3283 break;
3284 case vxge_hw_mgmt_reg_type_mrpcim:
3285 if (!(hldev->access_rights &
3286 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
3287 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3288 break;
3289 }
3290 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
3291 status = VXGE_HW_ERR_INVALID_OFFSET;
3292 break;
3293 }
3294 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
3295 break;
3296 case vxge_hw_mgmt_reg_type_srpcim:
3297 if (!(hldev->access_rights &
3298 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
3299 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3300 break;
3301 }
3302 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3303 status = VXGE_HW_ERR_INVALID_INDEX;
3304 break;
3305 }
3306 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
3307 status = VXGE_HW_ERR_INVALID_OFFSET;
3308 break;
3309 }
3310 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
3311 offset);
3312
3313 break;
3314 case vxge_hw_mgmt_reg_type_vpmgmt:
3315 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3316 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3317 status = VXGE_HW_ERR_INVALID_INDEX;
3318 break;
3319 }
3320 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
3321 status = VXGE_HW_ERR_INVALID_OFFSET;
3322 break;
3323 }
3324 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
3325 offset);
3326 break;
3327 case vxge_hw_mgmt_reg_type_vpath:
3328 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
3329 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3330 status = VXGE_HW_ERR_INVALID_INDEX;
3331 break;
3332 }
3333 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
3334 status = VXGE_HW_ERR_INVALID_OFFSET;
3335 break;
3336 }
3337 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
3338 offset);
3339 break;
3340 default:
3341 status = VXGE_HW_ERR_INVALID_TYPE;
3342 break;
3343 }
3344exit:
3345 return status;
3346}
3347
3348/*
3349 * __vxge_hw_fifo_abort - Returns the TxD
3350 * This function terminates the TxDs of fifo
3351 */
3352static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
3353{
3354 void *txdlh;
3355
3356 for (;;) {
3357 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
3358
3359 if (txdlh == NULL)
3360 break;
3361
3362 vxge_hw_channel_dtr_complete(&fifo->channel);
3363
3364 if (fifo->txdl_term) {
3365 fifo->txdl_term(txdlh,
3366 VXGE_HW_TXDL_STATE_POSTED,
3367 fifo->channel.userdata);
3368 }
3369
3370 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
3371 }
3372
3373 return VXGE_HW_OK;
3374}
3375
3376/*
3377 * __vxge_hw_fifo_reset - Resets the fifo
3378 * This function resets the fifo during vpath reset operation
3379 */
3380static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
3381{
3382 enum vxge_hw_status status = VXGE_HW_OK;
3383
3384 __vxge_hw_fifo_abort(fifo);
3385 status = __vxge_hw_channel_reset(&fifo->channel);
3386
3387 return status;
3388}
3389
3390/*
3391 * __vxge_hw_fifo_delete - Removes the FIFO
3392 * This function freeup the memory pool and removes the FIFO
3393 */
3394static enum vxge_hw_status
3395__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
3396{
3397 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
3398
3399 __vxge_hw_fifo_abort(fifo);
3400
3401 if (fifo->mempool)
3402 __vxge_hw_mempool_destroy(fifo->mempool);
3403
3404 vp->vpath->fifoh = NULL;
3405
3406 __vxge_hw_channel_free(&fifo->channel);
3407
3408 return VXGE_HW_OK;
3409}
3410
3411/*
3412 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
3413 * list callback
3414 * This function is callback passed to __vxge_hw_mempool_create to create memory
3415 * pool for TxD list
3416 */
3417static void
3418__vxge_hw_fifo_mempool_item_alloc(
3419 struct vxge_hw_mempool *mempoolh,
3420 u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
3421 u32 index, u32 is_last)
3422{
3423 u32 memblock_item_idx;
3424 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
3425 struct vxge_hw_fifo_txd *txdp =
3426 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
3427 struct __vxge_hw_fifo *fifo =
3428 (struct __vxge_hw_fifo *)mempoolh->userdata;
3429 void *memblock = mempoolh->memblocks_arr[memblock_index];
3430
3431 vxge_assert(txdp);
3432
3433 txdp->host_control = (u64) (size_t)
3434 __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
3435 &memblock_item_idx);
3436
3437 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
3438
3439 vxge_assert(txdl_priv);
3440
3441 fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
3442
3443 /* pre-format HW's TxDL's private */
3444 txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
3445 txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
3446 txdl_priv->dma_handle = dma_object->handle;
3447 txdl_priv->memblock = memblock;
3448 txdl_priv->first_txdp = txdp;
3449 txdl_priv->next_txdl_priv = NULL;
3450 txdl_priv->alloc_frags = 0;
3451}
3452
3453/*
3454 * __vxge_hw_fifo_create - Create a FIFO
3455 * This function creates FIFO and initializes it.
3456 */
3457static enum vxge_hw_status
3458__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
3459 struct vxge_hw_fifo_attr *attr)
3460{
3461 enum vxge_hw_status status = VXGE_HW_OK;
3462 struct __vxge_hw_fifo *fifo;
3463 struct vxge_hw_fifo_config *config;
3464 u32 txdl_size, txdl_per_memblock;
3465 struct vxge_hw_mempool_cbs fifo_mp_callback;
3466 struct __vxge_hw_virtualpath *vpath;
3467
3468 if ((vp == NULL) || (attr == NULL)) {
3469 status = VXGE_HW_ERR_INVALID_HANDLE;
3470 goto exit;
3471 }
3472 vpath = vp->vpath;
3473 config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
3474
3475 txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
3476
3477 txdl_per_memblock = config->memblock_size / txdl_size;
3478
3479 fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
3480 VXGE_HW_CHANNEL_TYPE_FIFO,
3481 config->fifo_blocks * txdl_per_memblock,
3482 attr->per_txdl_space, attr->userdata);
3483
3484 if (fifo == NULL) {
3485 status = VXGE_HW_ERR_OUT_OF_MEMORY;
3486 goto exit;
3487 }
3488
3489 vpath->fifoh = fifo;
3490 fifo->nofl_db = vpath->nofl_db;
3491
3492 fifo->vp_id = vpath->vp_id;
3493 fifo->vp_reg = vpath->vp_reg;
3494 fifo->stats = &vpath->sw_stats->fifo_stats;
3495
3496 fifo->config = config;
3497
3498 /* apply "interrupts per txdl" attribute */
3499 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
3500 fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
3501 fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
3502
3503 if (fifo->config->intr)
3504 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
3505
3506 fifo->no_snoop_bits = config->no_snoop_bits;
3507
3508 /*
3509 * FIFO memory management strategy:
3510 *
3511 * TxDL split into three independent parts:
3512 * - set of TxD's
3513 * - TxD HW private part
3514 * - driver private part
3515 *
3516 * Adaptative memory allocation used. i.e. Memory allocated on
3517 * demand with the size which will fit into one memory block.
3518 * One memory block may contain more than one TxDL.
3519 *
3520 * During "reserve" operations more memory can be allocated on demand
3521 * for example due to FIFO full condition.
3522 *
3523 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
3524 * routine which will essentially stop the channel and free resources.
3525 */
3526
3527 /* TxDL common private size == TxDL private + driver private */
3528 fifo->priv_size =
3529 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
3530 fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
3531 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
3532
3533 fifo->per_txdl_space = attr->per_txdl_space;
3534
3535 /* recompute txdl size to be cacheline aligned */
3536 fifo->txdl_size = txdl_size;
3537 fifo->txdl_per_memblock = txdl_per_memblock;
3538
3539 fifo->txdl_term = attr->txdl_term;
3540 fifo->callback = attr->callback;
3541
3542 if (fifo->txdl_per_memblock == 0) {
3543 __vxge_hw_fifo_delete(vp);
3544 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
3545 goto exit;
3546 }
3547
3548 fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
3549
3550 fifo->mempool =
3551 __vxge_hw_mempool_create(vpath->hldev,
3552 fifo->config->memblock_size,
3553 fifo->txdl_size,
3554 fifo->priv_size,
3555 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3556 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3557 &fifo_mp_callback,
3558 fifo);
3559
3560 if (fifo->mempool == NULL) {
3561 __vxge_hw_fifo_delete(vp);
3562 status = VXGE_HW_ERR_OUT_OF_MEMORY;
3563 goto exit;
3564 }
3565
3566 status = __vxge_hw_channel_initialize(&fifo->channel);
3567 if (status != VXGE_HW_OK) {
3568 __vxge_hw_fifo_delete(vp);
3569 goto exit;
3570 }
3571
3572 vxge_assert(fifo->channel.reserve_ptr);
3573exit:
3574 return status;
3575}
3576
3577/*
3578 * __vxge_hw_vpath_pci_read - Read the content of given address
3579 * in pci config space.
3580 * Read from the vpath pci config space.
3581 */
3582static enum vxge_hw_status
3583__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
3584 u32 phy_func_0, u32 offset, u32 *val)
3585{
3586 u64 val64;
3587 enum vxge_hw_status status = VXGE_HW_OK;
3588 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
3589
3590 val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
3591
3592 if (phy_func_0)
3593 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
3594
3595 writeq(val64, &vp_reg->pci_config_access_cfg1);
3596 wmb();
3597 writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
3598 &vp_reg->pci_config_access_cfg2);
3599 wmb();
3600
3601 status = __vxge_hw_device_register_poll(
3602 &vp_reg->pci_config_access_cfg2,
3603 VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3604
3605 if (status != VXGE_HW_OK)
3606 goto exit;
3607
3608 val64 = readq(&vp_reg->pci_config_access_status);
3609
3610 if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
3611 status = VXGE_HW_FAIL;
3612 *val = 0;
3613 } else
3614 *val = (u32)vxge_bVALn(val64, 32, 32);
3615exit:
3616 return status;
3617}
3618
3619/**
3620 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3621 * @hldev: HW device.
3622 * @on_off: TRUE if flickering to be on, FALSE to be off
3623 *
3624 * Flicker the link LED.
3625 */
3626enum vxge_hw_status
3627vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
3628{
3629 struct __vxge_hw_virtualpath *vpath;
3630 u64 data0, data1 = 0, steer_ctrl = 0;
3631 enum vxge_hw_status status;
3632
3633 if (hldev == NULL) {
3634 status = VXGE_HW_ERR_INVALID_DEVICE;
3635 goto exit;
3636 }
3637
3638 vpath = &hldev->virtual_paths[hldev->first_vp_id];
3639
3640 data0 = on_off;
3641 status = vxge_hw_vpath_fw_api(vpath,
3642 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
3643 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
3644 0, &data0, &data1, &steer_ctrl);
3645exit:
3646 return status;
3647}
3648
3649/*
3650 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3651 */
3652enum vxge_hw_status
3653__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3654 u32 action, u32 rts_table, u32 offset,
3655 u64 *data0, u64 *data1)
3656{
3657 enum vxge_hw_status status;
3658 u64 steer_ctrl = 0;
3659
3660 if (vp == NULL) {
3661 status = VXGE_HW_ERR_INVALID_HANDLE;
3662 goto exit;
3663 }
3664
3665 if ((rts_table ==
3666 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3667 (rts_table ==
3668 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3669 (rts_table ==
3670 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3671 (rts_table ==
3672 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3673 steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3674 }
3675
3676 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3677 data0, data1, &steer_ctrl);
3678 if (status != VXGE_HW_OK)
3679 goto exit;
3680
3681 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
3682 (rts_table !=
3683 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3684 *data1 = 0;
3685exit:
3686 return status;
3687}
3688
3689/*
3690 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3691 */
3692enum vxge_hw_status
3693__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
3694 u32 rts_table, u32 offset, u64 steer_data0,
3695 u64 steer_data1)
3696{
3697 u64 data0, data1 = 0, steer_ctrl = 0;
3698 enum vxge_hw_status status;
3699
3700 if (vp == NULL) {
3701 status = VXGE_HW_ERR_INVALID_HANDLE;
3702 goto exit;
3703 }
3704
3705 data0 = steer_data0;
3706
3707 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3708 (rts_table ==
3709 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3710 data1 = steer_data1;
3711
3712 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3713 &data0, &data1, &steer_ctrl);
3714exit:
3715 return status;
3716}
3717
3718/*
3719 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3720 */
3721enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3722 struct __vxge_hw_vpath_handle *vp,
3723 enum vxge_hw_rth_algoritms algorithm,
3724 struct vxge_hw_rth_hash_types *hash_type,
3725 u16 bucket_size)
3726{
3727 u64 data0, data1;
3728 enum vxge_hw_status status = VXGE_HW_OK;
3729
3730 if (vp == NULL) {
3731 status = VXGE_HW_ERR_INVALID_HANDLE;
3732 goto exit;
3733 }
3734
3735 status = __vxge_hw_vpath_rts_table_get(vp,
3736 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3737 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3738 0, &data0, &data1);
3739 if (status != VXGE_HW_OK)
3740 goto exit;
3741
3742 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3743 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3744
3745 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3746 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3747 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3748
3749 if (hash_type->hash_type_tcpipv4_en)
3750 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3751
3752 if (hash_type->hash_type_ipv4_en)
3753 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3754
3755 if (hash_type->hash_type_tcpipv6_en)
3756 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3757
3758 if (hash_type->hash_type_ipv6_en)
3759 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3760
3761 if (hash_type->hash_type_tcpipv6ex_en)
3762 data0 |=
3763 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3764
3765 if (hash_type->hash_type_ipv6ex_en)
3766 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3767
3768 if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3769 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3770 else
3771 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3772
3773 status = __vxge_hw_vpath_rts_table_set(vp,
3774 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3775 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3776 0, data0, 0);
3777exit:
3778 return status;
3779}
3780
3781static void
3782vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3783 u16 flag, u8 *itable)
3784{
3785 switch (flag) {
3786 case 1:
3787 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3788 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3789 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3790 itable[j]);
3791 case 2:
3792 *data0 |=
3793 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3794 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3795 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3796 itable[j]);
3797 case 3:
3798 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3799 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3800 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3801 itable[j]);
3802 case 4:
3803 *data1 |=
3804 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3805 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3806 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3807 itable[j]);
3808 default:
3809 return;
3810 }
3811}
3812/*
3813 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3814 */
3815enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3816 struct __vxge_hw_vpath_handle **vpath_handles,
3817 u32 vpath_count,
3818 u8 *mtable,
3819 u8 *itable,
3820 u32 itable_size)
3821{
3822 u32 i, j, action, rts_table;
3823 u64 data0;
3824 u64 data1;
3825 u32 max_entries;
3826 enum vxge_hw_status status = VXGE_HW_OK;
3827 struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3828
3829 if (vp == NULL) {
3830 status = VXGE_HW_ERR_INVALID_HANDLE;
3831 goto exit;
3832 }
3833
3834 max_entries = (((u32)1) << itable_size);
3835
3836 if (vp->vpath->hldev->config.rth_it_type
3837 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3838 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3839 rts_table =
3840 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3841
3842 for (j = 0; j < max_entries; j++) {
3843
3844 data1 = 0;
3845
3846 data0 =
3847 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3848 itable[j]);
3849
3850 status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3851 action, rts_table, j, data0, data1);
3852
3853 if (status != VXGE_HW_OK)
3854 goto exit;
3855 }
3856
3857 for (j = 0; j < max_entries; j++) {
3858
3859 data1 = 0;
3860
3861 data0 =
3862 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3863 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3864 itable[j]);
3865
3866 status = __vxge_hw_vpath_rts_table_set(
3867 vpath_handles[mtable[itable[j]]], action,
3868 rts_table, j, data0, data1);
3869
3870 if (status != VXGE_HW_OK)
3871 goto exit;
3872 }
3873 } else {
3874 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3875 rts_table =
3876 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3877 for (i = 0; i < vpath_count; i++) {
3878
3879 for (j = 0; j < max_entries;) {
3880
3881 data0 = 0;
3882 data1 = 0;
3883
3884 while (j < max_entries) {
3885 if (mtable[itable[j]] != i) {
3886 j++;
3887 continue;
3888 }
3889 vxge_hw_rts_rth_data0_data1_get(j,
3890 &data0, &data1, 1, itable);
3891 j++;
3892 break;
3893 }
3894
3895 while (j < max_entries) {
3896 if (mtable[itable[j]] != i) {
3897 j++;
3898 continue;
3899 }
3900 vxge_hw_rts_rth_data0_data1_get(j,
3901 &data0, &data1, 2, itable);
3902 j++;
3903 break;
3904 }
3905
3906 while (j < max_entries) {
3907 if (mtable[itable[j]] != i) {
3908 j++;
3909 continue;
3910 }
3911 vxge_hw_rts_rth_data0_data1_get(j,
3912 &data0, &data1, 3, itable);
3913 j++;
3914 break;
3915 }
3916
3917 while (j < max_entries) {
3918 if (mtable[itable[j]] != i) {
3919 j++;
3920 continue;
3921 }
3922 vxge_hw_rts_rth_data0_data1_get(j,
3923 &data0, &data1, 4, itable);
3924 j++;
3925 break;
3926 }
3927
3928 if (data0 != 0) {
3929 status = __vxge_hw_vpath_rts_table_set(
3930 vpath_handles[i],
3931 action, rts_table,
3932 0, data0, data1);
3933
3934 if (status != VXGE_HW_OK)
3935 goto exit;
3936 }
3937 }
3938 }
3939 }
3940exit:
3941 return status;
3942}
3943
3944/**
3945 * vxge_hw_vpath_check_leak - Check for memory leak
3946 * @ringh: Handle to the ring object used for receive
3947 *
3948 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3949 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3950 * Returns: VXGE_HW_FAIL, if leak has occurred.
3951 *
3952 */
3953enum vxge_hw_status
3954vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3955{
3956 enum vxge_hw_status status = VXGE_HW_OK;
3957 u64 rxd_new_count, rxd_spat;
3958
3959 if (ring == NULL)
3960 return status;
3961
3962 rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3963 rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3964 rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3965
3966 if (rxd_new_count >= rxd_spat)
3967 status = VXGE_HW_FAIL;
3968
3969 return status;
3970}
3971
3972/*
3973 * __vxge_hw_vpath_mgmt_read
3974 * This routine reads the vpath_mgmt registers
3975 */
3976static enum vxge_hw_status
3977__vxge_hw_vpath_mgmt_read(
3978 struct __vxge_hw_device *hldev,
3979 struct __vxge_hw_virtualpath *vpath)
3980{
3981 u32 i, mtu = 0, max_pyld = 0;
3982 u64 val64;
3983 enum vxge_hw_status status = VXGE_HW_OK;
3984
3985 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3986
3987 val64 = readq(&vpath->vpmgmt_reg->
3988 rxmac_cfg0_port_vpmgmt_clone[i]);
3989 max_pyld =
3990 (u32)
3991 VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3992 (val64);
3993 if (mtu < max_pyld)
3994 mtu = max_pyld;
3995 }
3996
3997 vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3998
3999 val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
4000
4001 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4002 if (val64 & vxge_mBIT(i))
4003 vpath->vsport_number = i;
4004 }
4005
4006 val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
4007
4008 if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
4009 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
4010 else
4011 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
4012
4013 return status;
4014}
4015
4016/*
4017 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
4018 * This routine checks the vpath_rst_in_prog register to see if
4019 * adapter completed the reset process for the vpath
4020 */
4021static enum vxge_hw_status
4022__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
4023{
4024 enum vxge_hw_status status;
4025
4026 status = __vxge_hw_device_register_poll(
4027 &vpath->hldev->common_reg->vpath_rst_in_prog,
4028 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
4029 1 << (16 - vpath->vp_id)),
4030 vpath->hldev->config.device_poll_millis);
4031
4032 return status;
4033}
4034
4035/*
4036 * __vxge_hw_vpath_reset
4037 * This routine resets the vpath on the device
4038 */
4039static enum vxge_hw_status
4040__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4041{
4042 u64 val64;
4043 enum vxge_hw_status status = VXGE_HW_OK;
4044
4045 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
4046
4047 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4048 &hldev->common_reg->cmn_rsthdlr_cfg0);
4049
4050 return status;
4051}
4052
4053/*
4054 * __vxge_hw_vpath_sw_reset
4055 * This routine resets the vpath structures
4056 */
4057static enum vxge_hw_status
4058__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4059{
4060 enum vxge_hw_status status = VXGE_HW_OK;
4061 struct __vxge_hw_virtualpath *vpath;
4062
4063 vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
4064
4065 if (vpath->ringh) {
4066 status = __vxge_hw_ring_reset(vpath->ringh);
4067 if (status != VXGE_HW_OK)
4068 goto exit;
4069 }
4070
4071 if (vpath->fifoh)
4072 status = __vxge_hw_fifo_reset(vpath->fifoh);
4073exit:
4074 return status;
4075}
4076
4077/*
4078 * __vxge_hw_vpath_prc_configure
4079 * This routine configures the prc registers of virtual path using the config
4080 * passed
4081 */
4082static void
4083__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4084{
4085 u64 val64;
4086 struct __vxge_hw_virtualpath *vpath;
4087 struct vxge_hw_vp_config *vp_config;
4088 struct vxge_hw_vpath_reg __iomem *vp_reg;
4089
4090 vpath = &hldev->virtual_paths[vp_id];
4091 vp_reg = vpath->vp_reg;
4092 vp_config = vpath->vp_config;
4093
4094 if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
4095 return;
4096
4097 val64 = readq(&vp_reg->prc_cfg1);
4098 val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
4099 writeq(val64, &vp_reg->prc_cfg1);
4100
4101 val64 = readq(&vpath->vp_reg->prc_cfg6);
4102 val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
4103 writeq(val64, &vpath->vp_reg->prc_cfg6);
4104
4105 val64 = readq(&vp_reg->prc_cfg7);
4106
4107 if (vpath->vp_config->ring.scatter_mode !=
4108 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
4109
4110 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
4111
4112 switch (vpath->vp_config->ring.scatter_mode) {
4113 case VXGE_HW_RING_SCATTER_MODE_A:
4114 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4115 VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
4116 break;
4117 case VXGE_HW_RING_SCATTER_MODE_B:
4118 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4119 VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
4120 break;
4121 case VXGE_HW_RING_SCATTER_MODE_C:
4122 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4123 VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
4124 break;
4125 }
4126 }
4127
4128 writeq(val64, &vp_reg->prc_cfg7);
4129
4130 writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
4131 __vxge_hw_ring_first_block_address_get(
4132 vpath->ringh) >> 3), &vp_reg->prc_cfg5);
4133
4134 val64 = readq(&vp_reg->prc_cfg4);
4135 val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
4136 val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
4137
4138 val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
4139 VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
4140
4141 if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
4142 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
4143 else
4144 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
4145
4146 writeq(val64, &vp_reg->prc_cfg4);
4147}
4148
4149/*
4150 * __vxge_hw_vpath_kdfc_configure
4151 * This routine configures the kdfc registers of virtual path using the
4152 * config passed
4153 */
4154static enum vxge_hw_status
4155__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4156{
4157 u64 val64;
4158 u64 vpath_stride;
4159 enum vxge_hw_status status = VXGE_HW_OK;
4160 struct __vxge_hw_virtualpath *vpath;
4161 struct vxge_hw_vpath_reg __iomem *vp_reg;
4162
4163 vpath = &hldev->virtual_paths[vp_id];
4164 vp_reg = vpath->vp_reg;
4165 status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
4166
4167 if (status != VXGE_HW_OK)
4168 goto exit;
4169
4170 val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
4171
4172 vpath->max_kdfc_db =
4173 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
4174 val64+1)/2;
4175
4176 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4177
4178 vpath->max_nofl_db = vpath->max_kdfc_db;
4179
4180 if (vpath->max_nofl_db <
4181 ((vpath->vp_config->fifo.memblock_size /
4182 (vpath->vp_config->fifo.max_frags *
4183 sizeof(struct vxge_hw_fifo_txd))) *
4184 vpath->vp_config->fifo.fifo_blocks)) {
4185
4186 return VXGE_HW_BADCFG_FIFO_BLOCKS;
4187 }
4188 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
4189 (vpath->max_nofl_db*2)-1);
4190 }
4191
4192 writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
4193
4194 writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
4195 &vp_reg->kdfc_fifo_trpl_ctrl);
4196
4197 val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
4198
4199 val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
4200 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
4201
4202 val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
4203 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
4204#ifndef __BIG_ENDIAN
4205 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
4206#endif
4207 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
4208
4209 writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
4210 writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
4211 wmb();
4212 vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
4213
4214 vpath->nofl_db =
4215 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
4216 (hldev->kdfc + (vp_id *
4217 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
4218 vpath_stride)));
4219exit:
4220 return status;
4221}
4222
4223/*
4224 * __vxge_hw_vpath_mac_configure
4225 * This routine configures the mac of virtual path using the config passed
4226 */
4227static enum vxge_hw_status
4228__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4229{
4230 u64 val64;
4231 enum vxge_hw_status status = VXGE_HW_OK;
4232 struct __vxge_hw_virtualpath *vpath;
4233 struct vxge_hw_vp_config *vp_config;
4234 struct vxge_hw_vpath_reg __iomem *vp_reg;
4235
4236 vpath = &hldev->virtual_paths[vp_id];
4237 vp_reg = vpath->vp_reg;
4238 vp_config = vpath->vp_config;
4239
4240 writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
4241 vpath->vsport_number), &vp_reg->xmac_vsport_choice);
4242
4243 if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4244
4245 val64 = readq(&vp_reg->xmac_rpa_vcfg);
4246
4247 if (vp_config->rpa_strip_vlan_tag !=
4248 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
4249 if (vp_config->rpa_strip_vlan_tag)
4250 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
4251 else
4252 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
4253 }
4254
4255 writeq(val64, &vp_reg->xmac_rpa_vcfg);
4256 val64 = readq(&vp_reg->rxmac_vcfg0);
4257
4258 if (vp_config->mtu !=
4259 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
4260 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4261 if ((vp_config->mtu +
4262 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
4263 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
4264 vp_config->mtu +
4265 VXGE_HW_MAC_HEADER_MAX_SIZE);
4266 else
4267 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
4268 vpath->max_mtu);
4269 }
4270
4271 writeq(val64, &vp_reg->rxmac_vcfg0);
4272
4273 val64 = readq(&vp_reg->rxmac_vcfg1);
4274
4275 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
4276 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
4277
4278 if (hldev->config.rth_it_type ==
4279 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
4280 val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
4281 0x2) |
4282 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
4283 }
4284
4285 writeq(val64, &vp_reg->rxmac_vcfg1);
4286 }
4287 return status;
4288}
4289
4290/*
4291 * __vxge_hw_vpath_tim_configure
4292 * This routine configures the tim registers of virtual path using the config
4293 * passed
4294 */
4295static enum vxge_hw_status
4296__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4297{
4298 u64 val64;
4299 enum vxge_hw_status status = VXGE_HW_OK;
4300 struct __vxge_hw_virtualpath *vpath;
4301 struct vxge_hw_vpath_reg __iomem *vp_reg;
4302 struct vxge_hw_vp_config *config;
4303
4304 vpath = &hldev->virtual_paths[vp_id];
4305 vp_reg = vpath->vp_reg;
4306 config = vpath->vp_config;
4307
4308 writeq(0, &vp_reg->tim_dest_addr);
4309 writeq(0, &vp_reg->tim_vpath_map);
4310 writeq(0, &vp_reg->tim_bitmap);
4311 writeq(0, &vp_reg->tim_remap);
4312
4313 if (config->ring.enable == VXGE_HW_RING_ENABLE)
4314 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
4315 (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4316 VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
4317
4318 val64 = readq(&vp_reg->tim_pci_cfg);
4319 val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
4320 writeq(val64, &vp_reg->tim_pci_cfg);
4321
4322 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4323
4324 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4325
4326 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4327 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4328 0x3ffffff);
4329 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4330 config->tti.btimer_val);
4331 }
4332
4333 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4334
4335 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4336 if (config->tti.timer_ac_en)
4337 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4338 else
4339 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4340 }
4341
4342 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4343 if (config->tti.timer_ci_en)
4344 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4345 else
4346 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4347 }
4348
4349 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4350 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4351 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4352 config->tti.urange_a);
4353 }
4354
4355 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4356 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4357 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4358 config->tti.urange_b);
4359 }
4360
4361 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4362 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4363 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4364 config->tti.urange_c);
4365 }
4366
4367 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4368 vpath->tim_tti_cfg1_saved = val64;
4369
4370 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4371
4372 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4373 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4374 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4375 config->tti.uec_a);
4376 }
4377
4378 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4379 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4380 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4381 config->tti.uec_b);
4382 }
4383
4384 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4385 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4386 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4387 config->tti.uec_c);
4388 }
4389
4390 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4391 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4392 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4393 config->tti.uec_d);
4394 }
4395
4396 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4397 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4398
4399 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4400 if (config->tti.timer_ri_en)
4401 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4402 else
4403 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4404 }
4405
4406 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4407 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4408 0x3ffffff);
4409 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4410 config->tti.rtimer_val);
4411 }
4412
4413 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4414 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4415 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
4416 }
4417
4418 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4419 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4420 0x3ffffff);
4421 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4422 config->tti.ltimer_val);
4423 }
4424
4425 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4426 vpath->tim_tti_cfg3_saved = val64;
4427 }
4428
4429 if (config->ring.enable == VXGE_HW_RING_ENABLE) {
4430
4431 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4432
4433 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4434 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4435 0x3ffffff);
4436 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4437 config->rti.btimer_val);
4438 }
4439
4440 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4441
4442 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4443 if (config->rti.timer_ac_en)
4444 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4445 else
4446 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4447 }
4448
4449 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4450 if (config->rti.timer_ci_en)
4451 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4452 else
4453 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4454 }
4455
4456 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4457 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4458 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4459 config->rti.urange_a);
4460 }
4461
4462 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4463 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4464 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4465 config->rti.urange_b);
4466 }
4467
4468 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4469 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4470 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4471 config->rti.urange_c);
4472 }
4473
4474 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4475 vpath->tim_rti_cfg1_saved = val64;
4476
4477 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4478
4479 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4480 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4481 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4482 config->rti.uec_a);
4483 }
4484
4485 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4486 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4487 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4488 config->rti.uec_b);
4489 }
4490
4491 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4492 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4493 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4494 config->rti.uec_c);
4495 }
4496
4497 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4498 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4499 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4500 config->rti.uec_d);
4501 }
4502
4503 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4504 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4505
4506 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4507 if (config->rti.timer_ri_en)
4508 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4509 else
4510 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4511 }
4512
4513 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4514 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4515 0x3ffffff);
4516 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4517 config->rti.rtimer_val);
4518 }
4519
4520 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4521 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4522 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
4523 }
4524
4525 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4526 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4527 0x3ffffff);
4528 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4529 config->rti.ltimer_val);
4530 }
4531
4532 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4533 vpath->tim_rti_cfg3_saved = val64;
4534 }
4535
4536 val64 = 0;
4537 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4538 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4539 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4540 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4541 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4542 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4543
4544 val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
4545 val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
4546 val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
4547 writeq(val64, &vp_reg->tim_wrkld_clc);
4548
4549 return status;
4550}
4551
4552/*
4553 * __vxge_hw_vpath_initialize
4554 * This routine is the final phase of init which initializes the
4555 * registers of the vpath using the configuration passed.
4556 */
4557static enum vxge_hw_status
4558__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4559{
4560 u64 val64;
4561 u32 val32;
4562 enum vxge_hw_status status = VXGE_HW_OK;
4563 struct __vxge_hw_virtualpath *vpath;
4564 struct vxge_hw_vpath_reg __iomem *vp_reg;
4565
4566 vpath = &hldev->virtual_paths[vp_id];
4567
4568 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4569 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4570 goto exit;
4571 }
4572 vp_reg = vpath->vp_reg;
4573
4574 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4575 if (status != VXGE_HW_OK)
4576 goto exit;
4577
4578 status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
4579 if (status != VXGE_HW_OK)
4580 goto exit;
4581
4582 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4583 if (status != VXGE_HW_OK)
4584 goto exit;
4585
4586 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4587 if (status != VXGE_HW_OK)
4588 goto exit;
4589
4590 val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4591
4592 /* Get MRRS value from device control */
4593 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4594 if (status == VXGE_HW_OK) {
4595 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4596 val64 &=
4597 ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4598 val64 |=
4599 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4600
4601 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4602 }
4603
4604 val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4605 val64 |=
4606 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4607 VXGE_HW_MAX_PAYLOAD_SIZE_512);
4608
4609 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4610 writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4611
4612exit:
4613 return status;
4614}
4615
4616/*
4617 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4618 * This routine closes all channels it opened and freeup memory
4619 */
4620static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4621{
4622 struct __vxge_hw_virtualpath *vpath;
4623
4624 vpath = &hldev->virtual_paths[vp_id];
4625
4626 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4627 goto exit;
4628
4629 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4630 vpath->hldev->tim_int_mask1, vpath->vp_id);
4631 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4632
4633 /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
4634 * work after the interface is brought down.
4635 */
4636 spin_lock(&vpath->lock);
4637 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4638 spin_unlock(&vpath->lock);
4639
4640 vpath->vpmgmt_reg = NULL;
4641 vpath->nofl_db = NULL;
4642 vpath->max_mtu = 0;
4643 vpath->vsport_number = 0;
4644 vpath->max_kdfc_db = 0;
4645 vpath->max_nofl_db = 0;
4646 vpath->ringh = NULL;
4647 vpath->fifoh = NULL;
4648 memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
4649 vpath->stats_block = 0;
4650 vpath->hw_stats = NULL;
4651 vpath->hw_stats_sav = NULL;
4652 vpath->sw_stats = NULL;
4653
4654exit:
4655 return;
4656}
4657
4658/*
4659 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4660 * This routine is the initial phase of init which resets the vpath and
4661 * initializes the software support structures.
4662 */
4663static enum vxge_hw_status
4664__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4665 struct vxge_hw_vp_config *config)
4666{
4667 struct __vxge_hw_virtualpath *vpath;
4668 enum vxge_hw_status status = VXGE_HW_OK;
4669
4670 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4671 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4672 goto exit;
4673 }
4674
4675 vpath = &hldev->virtual_paths[vp_id];
4676
4677 spin_lock_init(&vpath->lock);
4678 vpath->vp_id = vp_id;
4679 vpath->vp_open = VXGE_HW_VP_OPEN;
4680 vpath->hldev = hldev;
4681 vpath->vp_config = config;
4682 vpath->vp_reg = hldev->vpath_reg[vp_id];
4683 vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4684
4685 __vxge_hw_vpath_reset(hldev, vp_id);
4686
4687 status = __vxge_hw_vpath_reset_check(vpath);
4688 if (status != VXGE_HW_OK) {
4689 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4690 goto exit;
4691 }
4692
4693 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4694 if (status != VXGE_HW_OK) {
4695 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4696 goto exit;
4697 }
4698
4699 INIT_LIST_HEAD(&vpath->vpath_handles);
4700
4701 vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4702
4703 VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4704 hldev->tim_int_mask1, vp_id);
4705
4706 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4707 if (status != VXGE_HW_OK)
4708 __vxge_hw_vp_terminate(hldev, vp_id);
4709exit:
4710 return status;
4711}
4712
4713/*
4714 * vxge_hw_vpath_mtu_set - Set MTU.
4715 * Set new MTU value. Example, to use jumbo frames:
4716 * vxge_hw_vpath_mtu_set(my_device, 9600);
4717 */
4718enum vxge_hw_status
4719vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4720{
4721 u64 val64;
4722 enum vxge_hw_status status = VXGE_HW_OK;
4723 struct __vxge_hw_virtualpath *vpath;
4724
4725 if (vp == NULL) {
4726 status = VXGE_HW_ERR_INVALID_HANDLE;
4727 goto exit;
4728 }
4729 vpath = vp->vpath;
4730
4731 new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4732
4733 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4734 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4735
4736 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4737
4738 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4739 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4740
4741 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4742
4743 vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4744
4745exit:
4746 return status;
4747}
4748
4749/*
4750 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4751 * Enable the DMA vpath statistics. The function is to be called to re-enable
4752 * the adapter to update stats into the host memory
4753 */
4754static enum vxge_hw_status
4755vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4756{
4757 enum vxge_hw_status status = VXGE_HW_OK;
4758 struct __vxge_hw_virtualpath *vpath;
4759
4760 vpath = vp->vpath;
4761
4762 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4763 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4764 goto exit;
4765 }
4766
4767 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4768 sizeof(struct vxge_hw_vpath_stats_hw_info));
4769
4770 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4771exit:
4772 return status;
4773}
4774
4775/*
4776 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
4777 * This function allocates a block from block pool or from the system
4778 */
4779static struct __vxge_hw_blockpool_entry *
4780__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
4781{
4782 struct __vxge_hw_blockpool_entry *entry = NULL;
4783 struct __vxge_hw_blockpool *blockpool;
4784
4785 blockpool = &devh->block_pool;
4786
4787 if (size == blockpool->block_size) {
4788
4789 if (!list_empty(&blockpool->free_block_list))
4790 entry = (struct __vxge_hw_blockpool_entry *)
4791 list_first_entry(&blockpool->free_block_list,
4792 struct __vxge_hw_blockpool_entry,
4793 item);
4794
4795 if (entry != NULL) {
4796 list_del(&entry->item);
4797 blockpool->pool_size--;
4798 }
4799 }
4800
4801 if (entry != NULL)
4802 __vxge_hw_blockpool_blocks_add(blockpool);
4803
4804 return entry;
4805}
4806
4807/*
4808 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4809 * This function is used to open access to virtual path of an
4810 * adapter for offload, GRO operations. This function returns
4811 * synchronously.
4812 */
4813enum vxge_hw_status
4814vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4815 struct vxge_hw_vpath_attr *attr,
4816 struct __vxge_hw_vpath_handle **vpath_handle)
4817{
4818 struct __vxge_hw_virtualpath *vpath;
4819 struct __vxge_hw_vpath_handle *vp;
4820 enum vxge_hw_status status;
4821
4822 vpath = &hldev->virtual_paths[attr->vp_id];
4823
4824 if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4825 status = VXGE_HW_ERR_INVALID_STATE;
4826 goto vpath_open_exit1;
4827 }
4828
4829 status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4830 &hldev->config.vp_config[attr->vp_id]);
4831 if (status != VXGE_HW_OK)
4832 goto vpath_open_exit1;
4833
4834 vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
4835 if (vp == NULL) {
4836 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4837 goto vpath_open_exit2;
4838 }
4839
4840 vp->vpath = vpath;
4841
4842 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4843 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4844 if (status != VXGE_HW_OK)
4845 goto vpath_open_exit6;
4846 }
4847
4848 if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4849 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4850 if (status != VXGE_HW_OK)
4851 goto vpath_open_exit7;
4852
4853 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4854 }
4855
4856 vpath->fifoh->tx_intr_num =
4857 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4858 VXGE_HW_VPATH_INTR_TX;
4859
4860 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4861 VXGE_HW_BLOCK_SIZE);
4862 if (vpath->stats_block == NULL) {
4863 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4864 goto vpath_open_exit8;
4865 }
4866
4867 vpath->hw_stats = vpath->stats_block->memblock;
4868 memset(vpath->hw_stats, 0,
4869 sizeof(struct vxge_hw_vpath_stats_hw_info));
4870
4871 hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4872 vpath->hw_stats;
4873
4874 vpath->hw_stats_sav =
4875 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4876 memset(vpath->hw_stats_sav, 0,
4877 sizeof(struct vxge_hw_vpath_stats_hw_info));
4878
4879 writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4880
4881 status = vxge_hw_vpath_stats_enable(vp);
4882 if (status != VXGE_HW_OK)
4883 goto vpath_open_exit8;
4884
4885 list_add(&vp->item, &vpath->vpath_handles);
4886
4887 hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4888
4889 *vpath_handle = vp;
4890
4891 attr->fifo_attr.userdata = vpath->fifoh;
4892 attr->ring_attr.userdata = vpath->ringh;
4893
4894 return VXGE_HW_OK;
4895
4896vpath_open_exit8:
4897 if (vpath->ringh != NULL)
4898 __vxge_hw_ring_delete(vp);
4899vpath_open_exit7:
4900 if (vpath->fifoh != NULL)
4901 __vxge_hw_fifo_delete(vp);
4902vpath_open_exit6:
4903 vfree(vp);
4904vpath_open_exit2:
4905 __vxge_hw_vp_terminate(hldev, attr->vp_id);
4906vpath_open_exit1:
4907
4908 return status;
4909}
4910
4911/**
4912 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4913 * (vpath) open
4914 * @vp: Handle got from previous vpath open
4915 *
4916 * This function is used to close access to virtual path opened
4917 * earlier.
4918 */
4919void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4920{
4921 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4922 struct __vxge_hw_ring *ring = vpath->ringh;
4923 struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
4924 u64 new_count, val64, val164;
4925
4926 if (vdev->titan1) {
4927 new_count = readq(&vpath->vp_reg->rxdmem_size);
4928 new_count &= 0x1fff;
4929 } else
4930 new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
4931
4932 val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
4933
4934 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4935 &vpath->vp_reg->prc_rxd_doorbell);
4936 readl(&vpath->vp_reg->prc_rxd_doorbell);
4937
4938 val164 /= 2;
4939 val64 = readq(&vpath->vp_reg->prc_cfg6);
4940 val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4941 val64 &= 0x1ff;
4942
4943 /*
4944 * Each RxD is of 4 qwords
4945 */
4946 new_count -= (val64 + 1);
4947 val64 = min(val164, new_count) / 4;
4948
4949 ring->rxds_limit = min(ring->rxds_limit, val64);
4950 if (ring->rxds_limit < 4)
4951 ring->rxds_limit = 4;
4952}
4953
4954/*
4955 * __vxge_hw_blockpool_block_free - Frees a block from block pool
4956 * @devh: Hal device
4957 * @entry: Entry of block to be freed
4958 *
4959 * This function frees a block from block pool
4960 */
4961static void
4962__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
4963 struct __vxge_hw_blockpool_entry *entry)
4964{
4965 struct __vxge_hw_blockpool *blockpool;
4966
4967 blockpool = &devh->block_pool;
4968
4969 if (entry->length == blockpool->block_size) {
4970 list_add(&entry->item, &blockpool->free_block_list);
4971 blockpool->pool_size++;
4972 }
4973
4974 __vxge_hw_blockpool_blocks_remove(blockpool);
4975}
4976
4977/*
4978 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4979 * This function is used to close access to virtual path opened
4980 * earlier.
4981 */
4982enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4983{
4984 struct __vxge_hw_virtualpath *vpath = NULL;
4985 struct __vxge_hw_device *devh = NULL;
4986 u32 vp_id = vp->vpath->vp_id;
4987 u32 is_empty = TRUE;
4988 enum vxge_hw_status status = VXGE_HW_OK;
4989
4990 vpath = vp->vpath;
4991 devh = vpath->hldev;
4992
4993 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4994 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4995 goto vpath_close_exit;
4996 }
4997
4998 list_del(&vp->item);
4999
5000 if (!list_empty(&vpath->vpath_handles)) {
5001 list_add(&vp->item, &vpath->vpath_handles);
5002 is_empty = FALSE;
5003 }
5004
5005 if (!is_empty) {
5006 status = VXGE_HW_FAIL;
5007 goto vpath_close_exit;
5008 }
5009
5010 devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
5011
5012 if (vpath->ringh != NULL)
5013 __vxge_hw_ring_delete(vp);
5014
5015 if (vpath->fifoh != NULL)
5016 __vxge_hw_fifo_delete(vp);
5017
5018 if (vpath->stats_block != NULL)
5019 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
5020
5021 vfree(vp);
5022
5023 __vxge_hw_vp_terminate(devh, vp_id);
5024
5025vpath_close_exit:
5026 return status;
5027}
5028
5029/*
5030 * vxge_hw_vpath_reset - Resets vpath
5031 * This function is used to request a reset of vpath
5032 */
5033enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
5034{
5035 enum vxge_hw_status status;
5036 u32 vp_id;
5037 struct __vxge_hw_virtualpath *vpath = vp->vpath;
5038
5039 vp_id = vpath->vp_id;
5040
5041 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5042 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5043 goto exit;
5044 }
5045
5046 status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
5047 if (status == VXGE_HW_OK)
5048 vpath->sw_stats->soft_reset_cnt++;
5049exit:
5050 return status;
5051}
5052
5053/*
5054 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
5055 * This function poll's for the vpath reset completion and re initializes
5056 * the vpath.
5057 */
5058enum vxge_hw_status
5059vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
5060{
5061 struct __vxge_hw_virtualpath *vpath = NULL;
5062 enum vxge_hw_status status;
5063 struct __vxge_hw_device *hldev;
5064 u32 vp_id;
5065
5066 vp_id = vp->vpath->vp_id;
5067 vpath = vp->vpath;
5068 hldev = vpath->hldev;
5069
5070 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5071 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5072 goto exit;
5073 }
5074
5075 status = __vxge_hw_vpath_reset_check(vpath);
5076 if (status != VXGE_HW_OK)
5077 goto exit;
5078
5079 status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
5080 if (status != VXGE_HW_OK)
5081 goto exit;
5082
5083 status = __vxge_hw_vpath_initialize(hldev, vp_id);
5084 if (status != VXGE_HW_OK)
5085 goto exit;
5086
5087 if (vpath->ringh != NULL)
5088 __vxge_hw_vpath_prc_configure(hldev, vp_id);
5089
5090 memset(vpath->hw_stats, 0,
5091 sizeof(struct vxge_hw_vpath_stats_hw_info));
5092
5093 memset(vpath->hw_stats_sav, 0,
5094 sizeof(struct vxge_hw_vpath_stats_hw_info));
5095
5096 writeq(vpath->stats_block->dma_addr,
5097 &vpath->vp_reg->stats_cfg);
5098
5099 status = vxge_hw_vpath_stats_enable(vp);
5100
5101exit:
5102 return status;
5103}
5104
5105/*
5106 * vxge_hw_vpath_enable - Enable vpath.
5107 * This routine clears the vpath reset thereby enabling a vpath
5108 * to start forwarding frames and generating interrupts.
5109 */
5110void
5111vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
5112{
5113 struct __vxge_hw_device *hldev;
5114 u64 val64;
5115
5116 hldev = vp->vpath->hldev;
5117
5118 val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
5119 1 << (16 - vp->vpath->vp_id));
5120
5121 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
5122 &hldev->common_reg->cmn_rsthdlr_cfg1);
5123}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
new file mode 100644
index 000000000000..dd362584f5ca
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -0,0 +1,2111 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef VXGE_CONFIG_H
15#define VXGE_CONFIG_H
16#include <linux/hardirq.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <asm/io.h>
20
21#ifndef VXGE_CACHE_LINE_SIZE
22#define VXGE_CACHE_LINE_SIZE 128
23#endif
24
25#ifndef VXGE_ALIGN
26#define VXGE_ALIGN(adrs, size) \
27 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
28#endif
29
30#define VXGE_HW_MIN_MTU 68
31#define VXGE_HW_MAX_MTU 9600
32#define VXGE_HW_DEFAULT_MTU 1500
33
34#define VXGE_HW_MAX_ROM_IMAGES 8
35
36struct eprom_image {
37 u8 is_valid:1;
38 u8 index;
39 u8 type;
40 u16 version;
41};
42
43#ifdef VXGE_DEBUG_ASSERT
44/**
45 * vxge_assert
46 * @test: C-condition to check
47 * @fmt: printf like format string
48 *
49 * This function implements traditional assert. By default assertions
50 * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
51 * compilation
52 * time.
53 */
54#define vxge_assert(test) BUG_ON(!(test))
55#else
56#define vxge_assert(test)
57#endif /* end of VXGE_DEBUG_ASSERT */
58
59/**
60 * enum vxge_debug_level
61 * @VXGE_NONE: debug disabled
62 * @VXGE_ERR: all errors going to be logged out
63 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
64 * going to be logged out. Very noisy.
65 *
66 * This enumeration going to be used to switch between different
67 * debug levels during runtime if DEBUG macro defined during
68 * compilation. If DEBUG macro not defined than code will be
69 * compiled out.
70 */
71enum vxge_debug_level {
72 VXGE_NONE = 0,
73 VXGE_TRACE = 1,
74 VXGE_ERR = 2
75};
76
77#define NULL_VPID 0xFFFFFFFF
78#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
79#define VXGE_DEBUG_MODULE_MASK 0xffffffff
80#define VXGE_DEBUG_TRACE_MASK 0xffffffff
81#define VXGE_DEBUG_ERR_MASK 0xffffffff
82#define VXGE_DEBUG_MASK 0x000001ff
83#else
84#define VXGE_DEBUG_MODULE_MASK 0x20000000
85#define VXGE_DEBUG_TRACE_MASK 0x20000000
86#define VXGE_DEBUG_ERR_MASK 0x20000000
87#define VXGE_DEBUG_MASK 0x00000001
88#endif
89
90/*
91 * @VXGE_COMPONENT_LL: do debug for vxge link layer module
92 * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
93 *
94 * This enumeration going to be used to distinguish modules
95 * or libraries during compilation and runtime. Makefile must declare
96 * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
97 */
98#define VXGE_COMPONENT_LL 0x20000000
99#define VXGE_COMPONENT_ALL 0xffffffff
100
101#define VXGE_HW_BASE_INF 100
102#define VXGE_HW_BASE_ERR 200
103#define VXGE_HW_BASE_BADCFG 300
104
105enum vxge_hw_status {
106 VXGE_HW_OK = 0,
107 VXGE_HW_FAIL = 1,
108 VXGE_HW_PENDING = 2,
109 VXGE_HW_COMPLETIONS_REMAIN = 3,
110
111 VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
112 VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
113
114 VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
115 VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
116 VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
117 VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
118 VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
119 VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
120 VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
121 VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
122 VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
123 VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
124 VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
125 VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
126 VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
127 VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
128 VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
129 VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
130 VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17,
131 VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
132 VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
133 VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
134 VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
135 VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
136
137 VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
138 VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
139 VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
140 VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
141 VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
142 VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
143 VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
144
145 VXGE_HW_EOF_TRACE_BUF = -1
146};
147
148/**
149 * enum enum vxge_hw_device_link_state - Link state enumeration.
150 * @VXGE_HW_LINK_NONE: Invalid link state.
151 * @VXGE_HW_LINK_DOWN: Link is down.
152 * @VXGE_HW_LINK_UP: Link is up.
153 *
154 */
155enum vxge_hw_device_link_state {
156 VXGE_HW_LINK_NONE,
157 VXGE_HW_LINK_DOWN,
158 VXGE_HW_LINK_UP
159};
160
161/**
162 * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
163 * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
164 * @VXGE_HW_FW_UPGRADE_DONE: upload completed
165 * @VXGE_HW_FW_UPGRADE_ERR: upload error
166 * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
167 *
168 */
169enum vxge_hw_fw_upgrade_code {
170 VXGE_HW_FW_UPGRADE_OK = 0,
171 VXGE_HW_FW_UPGRADE_DONE = 1,
172 VXGE_HW_FW_UPGRADE_ERR = 2,
173 VXGE_FW_UPGRADE_BYTES2SKIP = 3
174};
175
176/**
177 * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
178 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
179 * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
180 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
181 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
182 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
183 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
184 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
185 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
186 * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
187 * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
188 */
189enum vxge_hw_fw_upgrade_err_code {
190 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
191 VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
192 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
193 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
194 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
195 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
196 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
197 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
198 VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
199 VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
200};
201
202/**
203 * struct vxge_hw_device_date - Date Format
204 * @day: Day
205 * @month: Month
206 * @year: Year
207 * @date: Date in string format
208 *
209 * Structure for returning date
210 */
211
212#define VXGE_HW_FW_STRLEN 32
213struct vxge_hw_device_date {
214 u32 day;
215 u32 month;
216 u32 year;
217 char date[VXGE_HW_FW_STRLEN];
218};
219
220struct vxge_hw_device_version {
221 u32 major;
222 u32 minor;
223 u32 build;
224 char version[VXGE_HW_FW_STRLEN];
225};
226
227/**
228 * struct vxge_hw_fifo_config - Configuration of fifo.
229 * @enable: Is this fifo to be commissioned
230 * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
231 * blocks per queue.
232 * @max_frags: Max number of Tx buffers per TxDL (that is, per single
233 * transmit operation).
234 * No more than 256 transmit buffers can be specified.
235 * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
236 * bytes. Setting @memblock_size to page size ensures
237 * by-page allocation of descriptors. 128K bytes is the
238 * maximum supported block size.
239 * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
240 * (e.g., to align on a cache line).
241 * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
242 * Use 0 otherwise.
243 * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
244 * which generally improves latency of the host bridge operation
245 * (see PCI specification). For valid values please refer
246 * to struct vxge_hw_fifo_config{} in the driver sources.
247 * Configuration of all Titan fifos.
248 * Note: Valid (min, max) range for each attribute is specified in the body of
249 * the struct vxge_hw_fifo_config{} structure.
250 */
251struct vxge_hw_fifo_config {
252 u32 enable;
253#define VXGE_HW_FIFO_ENABLE 1
254#define VXGE_HW_FIFO_DISABLE 0
255
256 u32 fifo_blocks;
257#define VXGE_HW_MIN_FIFO_BLOCKS 2
258#define VXGE_HW_MAX_FIFO_BLOCKS 128
259
260 u32 max_frags;
261#define VXGE_HW_MIN_FIFO_FRAGS 1
262#define VXGE_HW_MAX_FIFO_FRAGS 256
263
264 u32 memblock_size;
265#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
266#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
267#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
268
269 u32 alignment_size;
270#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
271#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
272#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
273
274 u32 intr;
275#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
276#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
277#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
278
279 u32 no_snoop_bits;
280#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
281#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
282#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
283#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
284#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
285
286};
287/**
288 * struct vxge_hw_ring_config - Ring configurations.
289 * @enable: Is this ring to be commissioned
290 * @ring_blocks: Numbers of RxD blocks in the ring
291 * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
292 * to Titan User Guide.
293 * @scatter_mode: Titan supports two receive scatter modes: A and B.
294 * For details please refer to Titan User Guide.
295 * @rx_timer_val: The number of 32ns periods that would be counted between two
296 * timer interrupts.
297 * @greedy_return: If Set it forces the device to return absolutely all RxD
298 * that are consumed and still on board when a timer interrupt
299 * triggers. If Clear, then if the device has already returned
300 * RxD before current timer interrupt trigerred and after the
301 * previous timer interrupt triggered, then the device is not
302 * forced to returned the rest of the consumed RxD that it has
303 * on board which account for a byte count less than the one
304 * programmed into PRC_CFG6.RXD_CRXDT field
305 * @rx_timer_ci: TBD
306 * @backoff_interval_us: Time (in microseconds), after which Titan
307 * tries to download RxDs posted by the host.
308 * Note that the "backoff" does not happen if host posts receive
309 * descriptors in the timely fashion.
310 * Ring configuration.
311 */
312struct vxge_hw_ring_config {
313 u32 enable;
314#define VXGE_HW_RING_ENABLE 1
315#define VXGE_HW_RING_DISABLE 0
316#define VXGE_HW_RING_DEFAULT 1
317
318 u32 ring_blocks;
319#define VXGE_HW_MIN_RING_BLOCKS 1
320#define VXGE_HW_MAX_RING_BLOCKS 128
321#define VXGE_HW_DEF_RING_BLOCKS 2
322
323 u32 buffer_mode;
324#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
325#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
326#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
327#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
328
329 u32 scatter_mode;
330#define VXGE_HW_RING_SCATTER_MODE_A 0
331#define VXGE_HW_RING_SCATTER_MODE_B 1
332#define VXGE_HW_RING_SCATTER_MODE_C 2
333#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
334
335 u64 rxds_limit;
336#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
337};
338
339/**
340 * struct vxge_hw_vp_config - Configuration of virtual path
341 * @vp_id: Virtual Path Id
342 * @min_bandwidth: Minimum Guaranteed bandwidth
343 * @ring: See struct vxge_hw_ring_config{}.
344 * @fifo: See struct vxge_hw_fifo_config{}.
345 * @tti: Configuration of interrupt associated with Transmit.
346 * see struct vxge_hw_tim_intr_config();
347 * @rti: Configuration of interrupt associated with Receive.
348 * see struct vxge_hw_tim_intr_config();
349 * @mtu: mtu size used on this port.
350 * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
351 * remove the VLAN tag from all received tagged frames that are not
352 * replicated at the internal L2 switch.
353 * 0 - Do not strip the VLAN tag.
354 * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
355 * always placed into the RxDMA descriptor.
356 *
357 * This structure is used by the driver to pass the configuration parameters to
358 * configure Virtual Path.
359 */
360struct vxge_hw_vp_config {
361 u32 vp_id;
362
363#define VXGE_HW_VPATH_PRIORITY_MIN 0
364#define VXGE_HW_VPATH_PRIORITY_MAX 16
365#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
366
367 u32 min_bandwidth;
368#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
369#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
370#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
371
372 struct vxge_hw_ring_config ring;
373 struct vxge_hw_fifo_config fifo;
374 struct vxge_hw_tim_intr_config tti;
375 struct vxge_hw_tim_intr_config rti;
376
377 u32 mtu;
378#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
379#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
380#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
381
382 u32 rpa_strip_vlan_tag;
383#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
384#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
385#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
386
387};
388/**
389 * struct vxge_hw_device_config - Device configuration.
390 * @dma_blockpool_initial: Initial size of DMA Pool
391 * @dma_blockpool_max: Maximum blocks in DMA pool
392 * @intr_mode: Line, or MSI-X interrupt.
393 *
394 * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
395 * @rth_it_type: RTH IT table programming type
396 * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
397 * @vp_config: Configuration for virtual paths
398 * @device_poll_millis: Specify the interval (in mulliseconds)
399 * to wait for register reads
400 *
401 * Titan configuration.
402 * Contains per-device configuration parameters, including:
403 * - stats sampling interval, etc.
404 *
405 * In addition, struct vxge_hw_device_config{} includes "subordinate"
406 * configurations, including:
407 * - fifos and rings;
408 * - MAC (done at firmware level).
409 *
410 * See Titan User Guide for more details.
411 * Note: Valid (min, max) range for each attribute is specified in the body of
412 * the struct vxge_hw_device_config{} structure. Please refer to the
413 * corresponding include file.
414 * See also: struct vxge_hw_tim_intr_config{}.
415 */
416struct vxge_hw_device_config {
417 u32 device_poll_millis;
418#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
419#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
420#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
421
422 u32 dma_blockpool_initial;
423 u32 dma_blockpool_max;
424#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
425#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
426#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
427#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
428
429#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
430
431 u32 intr_mode:2,
432#define VXGE_HW_INTR_MODE_IRQLINE 0
433#define VXGE_HW_INTR_MODE_MSIX 1
434#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
435
436#define VXGE_HW_INTR_MODE_DEF 0
437
438 rth_en:1,
439#define VXGE_HW_RTH_DISABLE 0
440#define VXGE_HW_RTH_ENABLE 1
441#define VXGE_HW_RTH_DEFAULT 0
442
443 rth_it_type:1,
444#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
445#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
446#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
447
448 rts_mac_en:1,
449#define VXGE_HW_RTS_MAC_DISABLE 0
450#define VXGE_HW_RTS_MAC_ENABLE 1
451#define VXGE_HW_RTS_MAC_DEFAULT 0
452
453 hwts_en:1;
454#define VXGE_HW_HWTS_DISABLE 0
455#define VXGE_HW_HWTS_ENABLE 1
456#define VXGE_HW_HWTS_DEFAULT 1
457
458 struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
459};
460
461/**
462 * function vxge_uld_link_up_f - Link-Up callback provided by driver.
463 * @devh: HW device handle.
464 * Link-up notification callback provided by the driver.
465 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
466 *
467 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
468 * vxge_hw_driver_initialize().
469 */
470
471/**
472 * function vxge_uld_link_down_f - Link-Down callback provided by
473 * driver.
474 * @devh: HW device handle.
475 *
476 * Link-Down notification callback provided by the driver.
477 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
478 *
479 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
480 * vxge_hw_driver_initialize().
481 */
482
483/**
484 * function vxge_uld_crit_err_f - Critical Error notification callback.
485 * @devh: HW device handle.
486 * (typically - at HW device iinitialization time).
487 * @type: Enumerated hw error, e.g.: double ECC.
488 * @serr_data: Titan status.
489 * @ext_data: Extended data. The contents depends on the @type.
490 *
491 * Link-Down notification callback provided by the driver.
492 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
493 *
494 * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
495 * vxge_hw_driver_initialize().
496 */
497
498/**
499 * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
500 * @link_up: See vxge_uld_link_up_f{}.
501 * @link_down: See vxge_uld_link_down_f{}.
502 * @crit_err: See vxge_uld_crit_err_f{}.
503 *
504 * Driver slow-path (per-driver) callbacks.
505 * Implemented by driver and provided to HW via
506 * vxge_hw_driver_initialize().
507 * Note that these callbacks are not mandatory: HW will not invoke
508 * a callback if NULL is specified.
509 *
510 * See also: vxge_hw_driver_initialize().
511 */
512struct vxge_hw_uld_cbs {
513 void (*link_up)(struct __vxge_hw_device *devh);
514 void (*link_down)(struct __vxge_hw_device *devh);
515 void (*crit_err)(struct __vxge_hw_device *devh,
516 enum vxge_hw_event type, u64 ext_data);
517};
518
519/*
520 * struct __vxge_hw_blockpool_entry - Block private data structure
521 * @item: List header used to link.
522 * @length: Length of the block
523 * @memblock: Virtual address block
524 * @dma_addr: DMA Address of the block.
525 * @dma_handle: DMA handle of the block.
526 * @acc_handle: DMA acc handle
527 *
528 * Block is allocated with a header to put the blocks into list.
529 *
530 */
531struct __vxge_hw_blockpool_entry {
532 struct list_head item;
533 u32 length;
534 void *memblock;
535 dma_addr_t dma_addr;
536 struct pci_dev *dma_handle;
537 struct pci_dev *acc_handle;
538};
539
540/*
541 * struct __vxge_hw_blockpool - Block Pool
542 * @hldev: HW device
543 * @block_size: size of each block.
544 * @Pool_size: Number of blocks in the pool
545 * @pool_max: Maximum number of blocks above which to free additional blocks
546 * @req_out: Number of block requests with OS out standing
547 * @free_block_list: List of free blocks
548 *
549 * Block pool contains the DMA blocks preallocated.
550 *
551 */
552struct __vxge_hw_blockpool {
553 struct __vxge_hw_device *hldev;
554 u32 block_size;
555 u32 pool_size;
556 u32 pool_max;
557 u32 req_out;
558 struct list_head free_block_list;
559 struct list_head free_entry_list;
560};
561
562/*
563 * enum enum __vxge_hw_channel_type - Enumerated channel types.
564 * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
565 * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
566 * @VXGE_HW_CHANNEL_TYPE_RING: ring.
567 * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
568 * (and recognized) channel types. Currently: 2.
569 *
570 * Enumerated channel types. Currently there are only two link-layer
571 * channels - Titan fifo and Titan ring. In the future the list will grow.
572 */
573enum __vxge_hw_channel_type {
574 VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
575 VXGE_HW_CHANNEL_TYPE_FIFO = 1,
576 VXGE_HW_CHANNEL_TYPE_RING = 2,
577 VXGE_HW_CHANNEL_TYPE_MAX = 3
578};
579
580/*
581 * struct __vxge_hw_channel
582 * @item: List item; used to maintain a list of open channels.
583 * @type: Channel type. See enum vxge_hw_channel_type{}.
584 * @devh: Device handle. HW device object that contains _this_ channel.
585 * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
586 * @length: Channel length. Currently allocated number of descriptors.
587 * The channel length "grows" when more descriptors get allocated.
588 * See _hw_mempool_grow.
589 * @reserve_arr: Reserve array. Contains descriptors that can be reserved
590 * by driver for the subsequent send or receive operation.
591 * See vxge_hw_fifo_txdl_reserve(),
592 * vxge_hw_ring_rxd_reserve().
593 * @reserve_ptr: Current pointer in the resrve array
594 * @reserve_top: Reserve top gives the maximum number of dtrs available in
595 * reserve array.
596 * @work_arr: Work array. Contains descriptors posted to the channel.
597 * Note that at any point in time @work_arr contains 3 types of
598 * descriptors:
599 * 1) posted but not yet consumed by Titan device;
600 * 2) consumed but not yet completed;
601 * 3) completed but not yet freed
602 * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
603 * @post_index: Post index. At any point in time points on the
604 * position in the channel, which'll contain next to-be-posted
605 * descriptor.
606 * @compl_index: Completion index. At any point in time points on the
607 * position in the channel, which will contain next
608 * to-be-completed descriptor.
609 * @free_arr: Free array. Contains completed descriptors that were freed
610 * (i.e., handed over back to HW) by driver.
611 * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
612 * @free_ptr: current pointer in free array
613 * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
614 * to store per-operation control information.
615 * @stats: Pointer to common statistics
616 * @userdata: Per-channel opaque (void*) user-defined context, which may be
617 * driver object, ULP connection, etc.
618 * Once channel is open, @userdata is passed back to user via
619 * vxge_hw_channel_callback_f.
620 *
621 * HW channel object.
622 *
623 * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
624 */
625struct __vxge_hw_channel {
626 struct list_head item;
627 enum __vxge_hw_channel_type type;
628 struct __vxge_hw_device *devh;
629 struct __vxge_hw_vpath_handle *vph;
630 u32 length;
631 u32 vp_id;
632 void **reserve_arr;
633 u32 reserve_ptr;
634 u32 reserve_top;
635 void **work_arr;
636 u32 post_index ____cacheline_aligned;
637 u32 compl_index ____cacheline_aligned;
638 void **free_arr;
639 u32 free_ptr;
640 void **orig_arr;
641 u32 per_dtr_space;
642 void *userdata;
643 struct vxge_hw_common_reg __iomem *common_reg;
644 u32 first_vp_id;
645 struct vxge_hw_vpath_stats_sw_common_info *stats;
646
647} ____cacheline_aligned;
648
649/*
650 * struct __vxge_hw_virtualpath - Virtual Path
651 *
652 * @vp_id: Virtual path id
653 * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
654 * @hldev: Hal device
655 * @vp_config: Virtual Path Config
656 * @vp_reg: VPATH Register map address in BAR0
657 * @vpmgmt_reg: VPATH_MGMT register map address
658 * @max_mtu: Max mtu that can be supported
659 * @vsport_number: vsport attached to this vpath
660 * @max_kdfc_db: Maximum kernel mode doorbells
661 * @max_nofl_db: Maximum non offload doorbells
662 * @tx_intr_num: Interrupt Number associated with the TX
663
664 * @ringh: Ring Queue
665 * @fifoh: FIFO Queue
666 * @vpath_handles: Virtual Path handles list
667 * @stats_block: Memory for DMAing stats
668 * @stats: Vpath statistics
669 *
670 * Virtual path structure to encapsulate the data related to a virtual path.
671 * Virtual paths are allocated by the HW upon getting configuration from the
672 * driver and inserted into the list of virtual paths.
673 */
674struct __vxge_hw_virtualpath {
675 u32 vp_id;
676
677 u32 vp_open;
678#define VXGE_HW_VP_NOT_OPEN 0
679#define VXGE_HW_VP_OPEN 1
680
681 struct __vxge_hw_device *hldev;
682 struct vxge_hw_vp_config *vp_config;
683 struct vxge_hw_vpath_reg __iomem *vp_reg;
684 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
685 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
686
687 u32 max_mtu;
688 u32 vsport_number;
689 u32 max_kdfc_db;
690 u32 max_nofl_db;
691 u64 tim_tti_cfg1_saved;
692 u64 tim_tti_cfg3_saved;
693 u64 tim_rti_cfg1_saved;
694 u64 tim_rti_cfg3_saved;
695
696 struct __vxge_hw_ring *____cacheline_aligned ringh;
697 struct __vxge_hw_fifo *____cacheline_aligned fifoh;
698 struct list_head vpath_handles;
699 struct __vxge_hw_blockpool_entry *stats_block;
700 struct vxge_hw_vpath_stats_hw_info *hw_stats;
701 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
702 struct vxge_hw_vpath_stats_sw_info *sw_stats;
703 spinlock_t lock;
704};
705
706/*
707 * struct __vxge_hw_vpath_handle - List item to store callback information
708 * @item: List head to keep the item in linked list
709 * @vpath: Virtual path to which this item belongs
710 *
711 * This structure is used to store the callback information.
712 */
713struct __vxge_hw_vpath_handle {
714 struct list_head item;
715 struct __vxge_hw_virtualpath *vpath;
716};
717
718/*
719 * struct __vxge_hw_device
720 *
721 * HW device object.
722 */
723/**
724 * struct __vxge_hw_device - Hal device object
725 * @magic: Magic Number
726 * @bar0: BAR0 virtual address.
727 * @pdev: Physical device handle
728 * @config: Confguration passed by the LL driver at initialization
729 * @link_state: Link state
730 *
731 * HW device object. Represents Titan adapter
732 */
733struct __vxge_hw_device {
734 u32 magic;
735#define VXGE_HW_DEVICE_MAGIC 0x12345678
736#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
737 void __iomem *bar0;
738 struct pci_dev *pdev;
739 struct net_device *ndev;
740 struct vxge_hw_device_config config;
741 enum vxge_hw_device_link_state link_state;
742
743 struct vxge_hw_uld_cbs uld_callbacks;
744
745 u32 host_type;
746 u32 func_id;
747 u32 access_rights;
748#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
749#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
750#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
751 struct vxge_hw_legacy_reg __iomem *legacy_reg;
752 struct vxge_hw_toc_reg __iomem *toc_reg;
753 struct vxge_hw_common_reg __iomem *common_reg;
754 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
755 struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
756 [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
757 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
758 [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
759 struct vxge_hw_vpath_reg __iomem *vpath_reg \
760 [VXGE_HW_TITAN_VPATH_REG_SPACES];
761 u8 __iomem *kdfc;
762 u8 __iomem *usdc;
763 struct __vxge_hw_virtualpath virtual_paths \
764 [VXGE_HW_MAX_VIRTUAL_PATHS];
765 u64 vpath_assignments;
766 u64 vpaths_deployed;
767 u32 first_vp_id;
768 u64 tim_int_mask0[4];
769 u32 tim_int_mask1[4];
770
771 struct __vxge_hw_blockpool block_pool;
772 struct vxge_hw_device_stats stats;
773 u32 debug_module_mask;
774 u32 debug_level;
775 u32 level_err;
776 u32 level_trace;
777 u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
778};
779
780#define VXGE_HW_INFO_LEN 64
781/**
782 * struct vxge_hw_device_hw_info - Device information
783 * @host_type: Host Type
784 * @func_id: Function Id
785 * @vpath_mask: vpath bit mask
786 * @fw_version: Firmware version
787 * @fw_date: Firmware Date
788 * @flash_version: Firmware version
789 * @flash_date: Firmware Date
790 * @mac_addrs: Mac addresses for each vpath
791 * @mac_addr_masks: Mac address masks for each vpath
792 *
793 * Returns the vpath mask that has the bits set for each vpath allocated
794 * for the driver and the first mac address for each vpath
795 */
796struct vxge_hw_device_hw_info {
797 u32 host_type;
798#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
799#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
800#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
801#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
802#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
803#define VXGE_HW_SR_VH_FUNCTION0 5
804#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
805#define VXGE_HW_VH_NORMAL_FUNCTION 7
806 u64 function_mode;
807#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
808#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
809#define VXGE_HW_FUNCTION_MODE_SRIOV 2
810#define VXGE_HW_FUNCTION_MODE_MRIOV 3
811#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
812#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
813#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
814#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
815#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
816#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
817#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
818
819 u32 func_id;
820 u64 vpath_mask;
821 struct vxge_hw_device_version fw_version;
822 struct vxge_hw_device_date fw_date;
823 struct vxge_hw_device_version flash_version;
824 struct vxge_hw_device_date flash_date;
825 u8 serial_number[VXGE_HW_INFO_LEN];
826 u8 part_number[VXGE_HW_INFO_LEN];
827 u8 product_desc[VXGE_HW_INFO_LEN];
828 u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
829 u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
830};
831
832/**
833 * struct vxge_hw_device_attr - Device memory spaces.
834 * @bar0: BAR0 virtual address.
835 * @pdev: PCI device object.
836 *
837 * Device memory spaces. Includes configuration, BAR0 etc. per device
838 * mapped memories. Also, includes a pointer to OS-specific PCI device object.
839 */
840struct vxge_hw_device_attr {
841 void __iomem *bar0;
842 struct pci_dev *pdev;
843 struct vxge_hw_uld_cbs uld_callbacks;
844};
845
846#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
847
848#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
849 if (i < 16) { \
850 m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
851 m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
852 } \
853 else { \
854 m1[0] = 0x80000000; \
855 m1[1] = 0x40000000; \
856 } \
857}
858
859#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
860 if (i < 16) { \
861 m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
862 m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
863 } \
864 else { \
865 m1[0] = 0; \
866 m1[1] = 0; \
867 } \
868}
869
870#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
871 status = vxge_hw_mrpcim_stats_access(hldev, \
872 VXGE_HW_STATS_OP_READ, \
873 loc, \
874 offset, \
875 &val64); \
876 if (status != VXGE_HW_OK) \
877 return status; \
878}
879
880/*
881 * struct __vxge_hw_ring - Ring channel.
882 * @channel: Channel "base" of this ring, the common part of all HW
883 * channels.
884 * @mempool: Memory pool, the pool from which descriptors get allocated.
885 * (See vxge_hw_mm.h).
886 * @config: Ring configuration, part of device configuration
887 * (see struct vxge_hw_device_config{}).
888 * @ring_length: Length of the ring
889 * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
890 * as per Titan User Guide.
891 * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
892 * 1-buffer mode descriptor is 32 byte long, etc.
893 * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
894 * per-descriptor data (e.g., DMA handle for Solaris)
895 * @per_rxd_space: Per rxd space requested by driver
896 * @rxds_per_block: Number of descriptors per hardware-defined RxD
897 * block. Depends on the (1-, 3-, 5-) buffer mode.
898 * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
899 * usage. Not to confuse with @rxd_priv_size.
900 * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
901 * @callback: Channel completion callback. HW invokes the callback when there
902 * are new completions on that channel. In many implementations
903 * the @callback executes in the hw interrupt context.
904 * @rxd_init: Channel's descriptor-initialize callback.
905 * See vxge_hw_ring_rxd_init_f{}.
906 * If not NULL, HW invokes the callback when opening
907 * the ring.
908 * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
909 * HW invokes the callback when closing the corresponding channel.
910 * See also vxge_hw_channel_rxd_term_f{}.
911 * @stats: Statistics for ring
912 * Ring channel.
913 *
914 * Note: The structure is cache line aligned to better utilize
915 * CPU cache performance.
916 */
917struct __vxge_hw_ring {
918 struct __vxge_hw_channel channel;
919 struct vxge_hw_mempool *mempool;
920 struct vxge_hw_vpath_reg __iomem *vp_reg;
921 struct vxge_hw_common_reg __iomem *common_reg;
922 u32 ring_length;
923 u32 buffer_mode;
924 u32 rxd_size;
925 u32 rxd_priv_size;
926 u32 per_rxd_space;
927 u32 rxds_per_block;
928 u32 rxdblock_priv_size;
929 u32 cmpl_cnt;
930 u32 vp_id;
931 u32 doorbell_cnt;
932 u32 total_db_cnt;
933 u64 rxds_limit;
934 u32 rtimer;
935 u64 tim_rti_cfg1_saved;
936 u64 tim_rti_cfg3_saved;
937
938 enum vxge_hw_status (*callback)(
939 struct __vxge_hw_ring *ringh,
940 void *rxdh,
941 u8 t_code,
942 void *userdata);
943
944 enum vxge_hw_status (*rxd_init)(
945 void *rxdh,
946 void *userdata);
947
948 void (*rxd_term)(
949 void *rxdh,
950 enum vxge_hw_rxd_state state,
951 void *userdata);
952
953 struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
954 struct vxge_hw_ring_config *config;
955} ____cacheline_aligned;
956
957/**
958 * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
959 * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
960 * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
961 * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
962 * device.
963 * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
964 * filling-in and posting later.
965 *
966 * Titan/HW descriptor states.
967 *
968 */
969enum vxge_hw_txdl_state {
970 VXGE_HW_TXDL_STATE_NONE = 0,
971 VXGE_HW_TXDL_STATE_AVAIL = 1,
972 VXGE_HW_TXDL_STATE_POSTED = 2,
973 VXGE_HW_TXDL_STATE_FREED = 3
974};
975/*
976 * struct __vxge_hw_fifo - Fifo.
977 * @channel: Channel "base" of this fifo, the common part of all HW
978 * channels.
979 * @mempool: Memory pool, from which descriptors get allocated.
980 * @config: Fifo configuration, part of device configuration
981 * (see struct vxge_hw_device_config{}).
982 * @interrupt_type: Interrupt type to be used
983 * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
984 * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
985 * on TxDL please refer to Titan UG.
986 * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
987 * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
988 * @priv_size: Per-Tx descriptor space reserved for driver
989 * usage.
990 * @per_txdl_space: Per txdl private space for the driver
991 * @callback: Fifo completion callback. HW invokes the callback when there
992 * are new completions on that fifo. In many implementations
993 * the @callback executes in the hw interrupt context.
994 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
995 * HW invokes the callback when closing the corresponding fifo.
996 * See also vxge_hw_fifo_txdl_term_f{}.
997 * @stats: Statistics of this fifo
998 *
999 * Fifo channel.
1000 * Note: The structure is cache line aligned.
1001 */
1002struct __vxge_hw_fifo {
1003 struct __vxge_hw_channel channel;
1004 struct vxge_hw_mempool *mempool;
1005 struct vxge_hw_fifo_config *config;
1006 struct vxge_hw_vpath_reg __iomem *vp_reg;
1007 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
1008 u64 interrupt_type;
1009 u32 no_snoop_bits;
1010 u32 txdl_per_memblock;
1011 u32 txdl_size;
1012 u32 priv_size;
1013 u32 per_txdl_space;
1014 u32 vp_id;
1015 u32 tx_intr_num;
1016 u32 rtimer;
1017 u64 tim_tti_cfg1_saved;
1018 u64 tim_tti_cfg3_saved;
1019
1020 enum vxge_hw_status (*callback)(
1021 struct __vxge_hw_fifo *fifo_handle,
1022 void *txdlh,
1023 enum vxge_hw_fifo_tcode t_code,
1024 void *userdata,
1025 struct sk_buff ***skb_ptr,
1026 int nr_skb,
1027 int *more);
1028
1029 void (*txdl_term)(
1030 void *txdlh,
1031 enum vxge_hw_txdl_state state,
1032 void *userdata);
1033
1034 struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
1035} ____cacheline_aligned;
1036
1037/*
1038 * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
1039 * @dma_addr: DMA (mapped) address of _this_ descriptor.
1040 * @dma_handle: DMA handle used to map the descriptor onto device.
1041 * @dma_offset: Descriptor's offset in the memory block. HW allocates
1042 * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
1043 * Each memblock is a contiguous block of DMA-able memory.
1044 * @frags: Total number of fragments (that is, contiguous data buffers)
1045 * carried by this TxDL.
1046 * @align_vaddr_start: Aligned virtual address start
1047 * @align_vaddr: Virtual address of the per-TxDL area in memory used for
1048 * alignement. Used to place one or more mis-aligned fragments
1049 * @align_dma_addr: DMA address translated from the @align_vaddr.
1050 * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
1051 * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
1052 * @align_dma_offset: The current offset into the @align_vaddr area.
1053 * Grows while filling the descriptor, gets reset.
1054 * @align_used_frags: Number of fragments used.
1055 * @alloc_frags: Total number of fragments allocated.
1056 * @unused: TODO
1057 * @next_txdl_priv: (TODO).
1058 * @first_txdp: (TODO).
1059 * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
1060 * TxDL list.
1061 * @txdlh: Corresponding txdlh to this TxDL.
1062 * @memblock: Pointer to the TxDL memory block or memory page.
1063 * on the next send operation.
1064 * @dma_object: DMA address and handle of the memory block that contains
1065 * the descriptor. This member is used only in the "checked"
1066 * version of the HW (to enforce certain assertions);
1067 * otherwise it gets compiled out.
1068 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
1069 *
1070 * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
1071 * information associated with the descriptor. Note that driver can ask HW
1072 * to allocate additional per-descriptor space for its own (driver-specific)
1073 * purposes.
1074 *
1075 * See also: struct vxge_hw_ring_rxd_priv{}.
1076 */
1077struct __vxge_hw_fifo_txdl_priv {
1078 dma_addr_t dma_addr;
1079 struct pci_dev *dma_handle;
1080 ptrdiff_t dma_offset;
1081 u32 frags;
1082 u8 *align_vaddr_start;
1083 u8 *align_vaddr;
1084 dma_addr_t align_dma_addr;
1085 struct pci_dev *align_dma_handle;
1086 struct pci_dev *align_dma_acch;
1087 ptrdiff_t align_dma_offset;
1088 u32 align_used_frags;
1089 u32 alloc_frags;
1090 u32 unused;
1091 struct __vxge_hw_fifo_txdl_priv *next_txdl_priv;
1092 struct vxge_hw_fifo_txd *first_txdp;
1093 void *memblock;
1094};
1095
1096/*
1097 * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
1098 * @control_0: Bits 0 to 7 - Doorbell type.
1099 * Bits 8 to 31 - Reserved.
1100 * Bits 32 to 39 - The highest TxD in this TxDL.
1101 * Bits 40 to 47 - Reserved.
1102 * Bits 48 to 55 - Reserved.
1103 * Bits 56 to 63 - No snoop flags.
1104 * @txdl_ptr: The starting location of the TxDL in host memory.
1105 *
1106 * Created by the host and written to the adapter via PIO to a Kernel Doorbell
1107 * FIFO. All non-offload doorbell wrapper fields must be written by the host as
1108 * part of a doorbell write. Consumed by the adapter but is not written by the
1109 * adapter.
1110 */
1111struct __vxge_hw_non_offload_db_wrapper {
1112 u64 control_0;
1113#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
1114#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
1115#define VXGE_HW_NODBW_TYPE_NODBW 0
1116
1117#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
1118#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
1119
1120#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
1121#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
1122#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
1123#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
1124
1125 u64 txdl_ptr;
1126};
1127
1128/*
1129 * TX Descriptor
1130 */
1131
1132/**
1133 * struct vxge_hw_fifo_txd - Transmit Descriptor
1134 * @control_0: Bits 0 to 6 - Reserved.
1135 * Bit 7 - List Ownership. This field should be initialized
1136 * to '1' by the driver before the transmit list pointer is
1137 * written to the adapter. This field will be set to '0' by the
1138 * adapter once it has completed transmitting the frame or frames in
1139 * the list. Note - This field is only valid in TxD0. Additionally,
1140 * for multi-list sequences, the driver should not release any
1141 * buffers until the ownership of the last list in the multi-list
1142 * sequence has been returned to the host.
1143 * Bits 8 to 11 - Reserved
1144 * Bits 12 to 15 - Transfer_Code. This field is only valid in
1145 * TxD0. It is used to describe the status of the transmit data
1146 * buffer transfer. This field is always overwritten by the
1147 * adapter, so this field may be initialized to any value.
1148 * Bits 16 to 17 - Host steering. This field allows the host to
1149 * override the selection of the physical transmit port.
1150 * Attention:
1151 * Normal sounds as if learned from the switch rather than from
1152 * the aggregation algorythms.
1153 * 00: Normal. Use Destination/MAC Address
1154 * lookup to determine the transmit port.
1155 * 01: Send on physical Port1.
1156 * 10: Send on physical Port0.
1157 * 11: Send on both ports.
1158 * Bits 18 to 21 - Reserved
1159 * Bits 22 to 23 - Gather_Code. This field is set by the host and
1160 * is used to describe how individual buffers comprise a frame.
1161 * 10: First descriptor of a frame.
1162 * 00: Middle of a multi-descriptor frame.
1163 * 01: Last descriptor of a frame.
1164 * 11: First and last descriptor of a frame (the entire frame
1165 * resides in a single buffer).
1166 * For multi-descriptor frames, the only valid gather code sequence
1167 * is {10, [00], 01}. In other words, the descriptors must be placed
1168 * in the list in the correct order.
1169 * Bits 24 to 27 - Reserved
1170 * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
1171 * definition. Only valid in TxD0. This field allows the host to
1172 * indicate the Ethernet encapsulation of an outbound LSO packet.
1173 * 00 - classic mode (best guess)
1174 * 01 - LLC
1175 * 10 - SNAP
1176 * 11 - DIX
1177 * If "classic mode" is selected, the adapter will attempt to
1178 * decode the frame's Ethernet encapsulation by examining the L/T
1179 * field as follows:
1180 * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
1181 * if packet is IPv4 or IPv6.
1182 * 0x8870 Jumbo-SNAP encoding.
1183 * 0x0800 IPv4 DIX encoding
1184 * 0x86DD IPv6 DIX encoding
1185 * others illegal encapsulation
1186 * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
1187 * Set to 1 to perform segmentation offload for TCP/UDP.
1188 * This field is valid only in TxD0.
1189 * Bits 31 to 33 - Reserved.
1190 * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
1191 * This field is meaningful only when LSO_Control is non-zero.
1192 * When LSO_Control is set to TCP_LSO, the single (possibly large)
1193 * TCP segment described by this TxDL will be sent as a series of
1194 * TCP segments each of which contains no more than LSO_MSS
1195 * payload bytes.
1196 * When LSO_Control is set to UDP_LSO, the single (possibly large)
1197 * UDP datagram described by this TxDL will be sent as a series of
1198 * UDP datagrams each of which contains no more than LSO_MSS
1199 * payload bytes.
1200 * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
1201 * or TCP payload, with the exception of the last, which will have
1202 * <= LSO_MSS bytes of payload.
1203 * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
1204 * buffer to be read by the adapter. This field is written by the
1205 * host. A value of 0 is illegal.
1206 * Bits 32 to 63 - This value is written by the adapter upon
1207 * completion of a UDP or TCP LSO operation and indicates the number
1208 * of UDP or TCP payload bytes that were transmitted. 0x0000 will be
1209 * returned for any non-LSO operation.
1210 * @control_1: Bits 0 to 4 - Reserved.
1211 * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
1212 * offload. This field is only valid in the first TxD of a frame.
1213 * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
1214 * This field is only valid in the first TxD of a frame (the TxD's
1215 * gather code must be 10 or 11). The driver should only set this
1216 * bit if it can guarantee that TCP is present.
1217 * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
1218 * This field is only valid in the first TxD of a frame (the TxD's
1219 * gather code must be 10 or 11). The driver should only set this
1220 * bit if it can guarantee that UDP is present.
1221 * Bits 8 to 14 - Reserved.
1222 * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
1223 * instruct the adapter to insert the VLAN tag specified by the
1224 * Tx_VLAN_Tag field. This field is only valid in the first TxD of
1225 * a frame.
1226 * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
1227 * to be inserted into the frame by the adapter (the first two bytes
1228 * of a VLAN tag are always 0x8100). This field is only valid if the
1229 * Tx_VLAN_Enable field is set to '1'.
1230 * Bits 32 to 33 - Reserved.
1231 * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
1232 * number the frame associated with. This field is written by the
1233 * host. It is only valid in the first TxD of a frame.
1234 * Bits 40 to 42 - Reserved.
1235 * Bit 43 - Set to 1 to exclude the frame from bandwidth metering
1236 * functions. This field is valid only in the first TxD
1237 * of a frame.
1238 * Bits 44 to 45 - Reserved.
1239 * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
1240 * generate an interrupt as soon as all of the frames in the list
1241 * have been transmitted. In order to have per-frame interrupts,
1242 * the driver should place a maximum of one frame per list. This
1243 * field is only valid in the first TxD of a frame.
1244 * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
1245 * to count the frame toward the utilization interrupt specified in
1246 * the Tx_Int_Number field. This field is only valid in the first
1247 * TxD of a frame.
1248 * Bits 48 to 63 - Reserved.
1249 * @buffer_pointer: Buffer start address.
1250 * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
1251 * Titan descriptor prior to posting the latter on the fifo
1252 * via vxge_hw_fifo_txdl_post().The %host_control is returned as is
1253 * to the driver with each completed descriptor.
1254 *
1255 * Transmit descriptor (TxD).Fifo descriptor contains configured number
1256 * (list) of TxDs. * For more details please refer to Titan User Guide,
1257 * Section 5.4.2 "Transmit Descriptor (TxD) Format".
1258 */
1259struct vxge_hw_fifo_txd {
1260 u64 control_0;
1261#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1262
1263#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1264#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1265#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
1266
1267
1268#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
1269#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
1270#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
1271
1272
1273#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
1274
1275#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
1276
1277#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
1278
1279 u64 control_1;
1280#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
1281#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
1282#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
1283#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
1284
1285#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
1286
1287#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
1288
1289#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
1290#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
1291
1292 u64 buffer_pointer;
1293
1294 u64 host_control;
1295};
1296
1297/**
1298 * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
1299 * @host_control: This field is exclusively for host use and is "readonly"
1300 * from the adapter's perspective.
1301 * @control_0:Bits 0 to 6 - RTH_Bucket get
1302 * Bit 7 - Own Descriptor ownership bit. This bit is set to 1
1303 * by the host, and is set to 0 by the adapter.
1304 * 0 - Host owns RxD and buffer.
1305 * 1 - The adapter owns RxD and buffer.
1306 * Bit 8 - Fast_Path_Eligible When set, indicates that the
1307 * received frame meets all of the criteria for fast path processing.
1308 * The required criteria are as follows:
1309 * !SYN &
1310 * (Transfer_Code == "Transfer OK") &
1311 * (!Is_IP_Fragment) &
1312 * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
1313 * (Is_IPv6)) &
1314 * ((Is_TCP & computed_L4_checksum == 0xFFFF) |
1315 * (Is_UDP & (computed_L4_checksum == 0xFFFF |
1316 * computed _L4_checksum == 0x0000)))
1317 * (same meaning for all RxD buffer modes)
1318 * Bit 9 - L3 Checksum Correct
1319 * Bit 10 - L4 Checksum Correct
1320 * Bit 11 - Reserved
1321 * Bit 12 to 15 - This field is written by the adapter. It is
1322 * used to report the status of the frame transfer to the host.
1323 * 0x0 - Transfer OK
1324 * 0x4 - RDA Failure During Transfer
1325 * 0x5 - Unparseable Packet, such as unknown IPv6 header.
1326 * 0x6 - Frame integrity error (FCS or ECC).
1327 * 0x7 - Buffer Size Error. The provided buffer(s) were not
1328 * appropriately sized and data loss occurred.
1329 * 0x8 - Internal ECC Error. RxD corrupted.
1330 * 0x9 - IPv4 Checksum error
1331 * 0xA - TCP/UDP Checksum error
1332 * 0xF - Unknown Error or Multiple Error. Indicates an
1333 * unknown problem or that more than one of transfer codes is set.
1334 * Bit 16 - SYN The adapter sets this field to indicate that
1335 * the incoming frame contained a TCP segment with its SYN bit
1336 * set and its ACK bit NOT set. (same meaning for all RxD buffer
1337 * modes)
1338 * Bit 17 - Is ICMP
1339 * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
1340 * Socket Pair Direct Match Table and the frame was steered based
1341 * on SPDM.
1342 * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
1343 * Indirection Table and the frame was steered based on hash
1344 * indirection.
1345 * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
1346 * type) that was used to calculate the hash.
1347 * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
1348 * tagged.
1349 * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
1350 * of the received frame.
1351 * 0x0 - Ethernet DIX
1352 * 0x1 - LLC
1353 * 0x2 - SNAP (includes Jumbo-SNAP)
1354 * 0x3 - IPX
1355 * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet.
1356 * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
1357 * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
1358 * IP packet.
1359 * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
1360 * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
1361 * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that
1362 * arrived with the frame. If the resulting computed IPv4 header
1363 * checksum for the frame did not produce the expected 0xFFFF value,
1364 * then the transfer code would be set to 0x9.
1365 * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
1366 * arrived with the frame. If the resulting computed TCP/UDP checksum
1367 * for the frame did not produce the expected 0xFFFF value, then the
1368 * transfer code would be set to 0xA.
1369 * @control_1:Bits 0 to 1 - Reserved
1370 * Bits 2 to 15 - Buffer0_Size.This field is set by the host and
1371 * eventually overwritten by the adapter. The host writes the
1372 * available buffer size in bytes when it passes the descriptor to
1373 * the adapter. When a frame is delivered the host, the adapter
1374 * populates this field with the number of bytes written into the
1375 * buffer. The largest supported buffer is 16, 383 bytes.
1376 * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
1377 * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
1378 * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
1379 * of the VLAN tag, if one was detected by the adapter. This field is
1380 * populated even if VLAN-tag stripping is enabled.
1381 * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
1382 *
1383 * One buffer mode RxD for ring structure
1384 */
1385struct vxge_hw_ring_rxd_1 {
1386 u64 host_control;
1387 u64 control_0;
1388#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
1389
1390#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1391
1392#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
1393
1394#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
1395
1396#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
1397
1398#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1399#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1400
1401#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
1402
1403#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
1404
1405#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
1406
1407#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
1408
1409#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
1410
1411#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
1412
1413#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
1414
1415#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
1416
1417#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
1418
1419#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
1420
1421#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
1422
1423 u64 control_1;
1424
1425#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
1426#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
1427#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
1428
1429#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
1430
1431#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
1432
1433 u64 buffer0_ptr;
1434};
1435
1436enum vxge_hw_rth_algoritms {
1437 RTH_ALG_JENKINS = 0,
1438 RTH_ALG_MS_RSS = 1,
1439 RTH_ALG_CRC32C = 2
1440};
1441
1442/**
1443 * struct vxge_hw_rth_hash_types - RTH hash types.
1444 * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
1445 * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
1446 * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
1447 * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
1448 * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
1449 * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
1450 *
1451 * Used to pass RTH hash types to rts_rts_set.
1452 *
1453 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
1454 */
1455struct vxge_hw_rth_hash_types {
1456 u8 hash_type_tcpipv4_en:1,
1457 hash_type_ipv4_en:1,
1458 hash_type_tcpipv6_en:1,
1459 hash_type_ipv6_en:1,
1460 hash_type_tcpipv6ex_en:1,
1461 hash_type_ipv6ex_en:1;
1462};
1463
1464void vxge_hw_device_debug_set(
1465 struct __vxge_hw_device *devh,
1466 enum vxge_debug_level level,
1467 u32 mask);
1468
1469u32
1470vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
1471
1472u32
1473vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
1474
1475/**
1476 * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
1477 * @buf_mode: Buffer mode (1, 3 or 5)
1478 *
1479 * This function returns the size of RxD for given buffer mode
1480 */
1481static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
1482{
1483 return sizeof(struct vxge_hw_ring_rxd_1);
1484}
1485
1486/**
1487 * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
1488 * @buf_mode: Buffer mode (1 buffer mode only)
1489 *
1490 * This function returns the number of RxD for RxD block for given buffer mode
1491 */
1492static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
1493{
1494 return (u32)((VXGE_HW_BLOCK_SIZE-16) /
1495 sizeof(struct vxge_hw_ring_rxd_1));
1496}
1497
1498/**
1499 * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
1500 * @rxdh: Descriptor handle.
1501 * @dma_pointer: DMA address of a single receive buffer this descriptor
1502 * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
1503 * the receive buffer should be already mapped to the device
1504 * @size: Size of the receive @dma_pointer buffer.
1505 *
1506 * Prepare 1-buffer-mode Rx descriptor for posting
1507 * (via vxge_hw_ring_rxd_post()).
1508 *
1509 * This inline helper-function does not return any parameters and always
1510 * succeeds.
1511 *
1512 */
1513static inline
1514void vxge_hw_ring_rxd_1b_set(
1515 void *rxdh,
1516 dma_addr_t dma_pointer,
1517 u32 size)
1518{
1519 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1520 rxdp->buffer0_ptr = dma_pointer;
1521 rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
1522 rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
1523}
1524
1525/**
1526 * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
1527 * descriptor.
1528 * @vpath_handle: Virtual Path handle.
1529 * @rxdh: Descriptor handle.
1530 * @dma_pointer: DMA address of a single receive buffer this descriptor
1531 * carries. Returned by HW.
1532 * @pkt_length: Length (in bytes) of the data in the buffer pointed by
1533 *
1534 * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
1535 * This inline helper-function uses completed descriptor to populate receive
1536 * buffer pointer and other "out" parameters. The function always succeeds.
1537 *
1538 */
1539static inline
1540void vxge_hw_ring_rxd_1b_get(
1541 struct __vxge_hw_ring *ring_handle,
1542 void *rxdh,
1543 u32 *pkt_length)
1544{
1545 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1546
1547 *pkt_length =
1548 (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
1549}
1550
1551/**
1552 * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
1553 * a completed receive descriptor for 1b mode.
1554 * @vpath_handle: Virtual Path handle.
1555 * @rxdh: Descriptor handle.
1556 * @rxd_info: Descriptor information
1557 *
1558 * Retrieve extended information associated with a completed receive descriptor.
1559 *
1560 */
1561static inline
1562void vxge_hw_ring_rxd_1b_info_get(
1563 struct __vxge_hw_ring *ring_handle,
1564 void *rxdh,
1565 struct vxge_hw_ring_rxd_info *rxd_info)
1566{
1567
1568 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1569 rxd_info->syn_flag =
1570 (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
1571 rxd_info->is_icmp =
1572 (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
1573 rxd_info->fast_path_eligible =
1574 (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
1575 rxd_info->l3_cksum_valid =
1576 (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
1577 rxd_info->l3_cksum =
1578 (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
1579 rxd_info->l4_cksum_valid =
1580 (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
1581 rxd_info->l4_cksum =
1582 (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
1583 rxd_info->frame =
1584 (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
1585 rxd_info->proto =
1586 (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
1587 rxd_info->is_vlan =
1588 (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
1589 rxd_info->vlan =
1590 (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
1591 rxd_info->rth_bucket =
1592 (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
1593 rxd_info->rth_it_hit =
1594 (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
1595 rxd_info->rth_spdm_hit =
1596 (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
1597 rxd_info->rth_hash_type =
1598 (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
1599 rxd_info->rth_value =
1600 (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
1601}
1602
1603/**
1604 * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
1605 * of 1b mode 3b mode ring.
1606 * @rxdh: Descriptor handle.
1607 *
1608 * Returns: private driver info associated with the descriptor.
1609 * driver requests per-descriptor space via vxge_hw_ring_attr.
1610 *
1611 */
1612static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
1613{
1614 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1615 return (void *)(size_t)rxdp->host_control;
1616}
1617
1618/**
1619 * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
1620 * @txdlh: Descriptor handle.
1621 * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
1622 * and/or TCP and/or UDP.
1623 *
1624 * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
1625 * descriptor.
1626 * This API is part of the preparation of the transmit descriptor for posting
1627 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1628 * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1629 * and vxge_hw_fifo_txdl_buffer_set().
1630 * All these APIs fill in the fields of the fifo descriptor,
1631 * in accordance with the Titan specification.
1632 *
1633 */
1634static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
1635{
1636 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1637 txdp->control_1 |= cksum_bits;
1638}
1639
1640/**
1641 * vxge_hw_fifo_txdl_mss_set - Set MSS.
1642 * @txdlh: Descriptor handle.
1643 * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
1644 * driver, which in turn inserts the MSS into the @txdlh.
1645 *
1646 * This API is part of the preparation of the transmit descriptor for posting
1647 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1648 * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1649 * and vxge_hw_fifo_txdl_cksum_set_bits().
1650 * All these APIs fill in the fields of the fifo descriptor,
1651 * in accordance with the Titan specification.
1652 *
1653 */
1654static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
1655{
1656 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1657
1658 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
1659 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
1660}
1661
1662/**
1663 * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
1664 * @txdlh: Descriptor handle.
1665 * @vlan_tag: 16bit VLAN tag.
1666 *
1667 * Insert VLAN tag into specified transmit descriptor.
1668 * The actual insertion of the tag into outgoing frame is done by the hardware.
1669 */
1670static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
1671{
1672 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1673
1674 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
1675 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
1676}
1677
1678/**
1679 * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
1680 * @txdlh: Descriptor handle.
1681 *
1682 * Retrieve per-descriptor private data.
1683 * Note that driver requests per-descriptor space via
1684 * struct vxge_hw_fifo_attr passed to
1685 * vxge_hw_vpath_open().
1686 *
1687 * Returns: private driver data associated with the descriptor.
1688 */
1689static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
1690{
1691 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1692
1693 return (void *)(size_t)txdp->host_control;
1694}
1695
1696/**
1697 * struct vxge_hw_ring_attr - Ring open "template".
1698 * @callback: Ring completion callback. HW invokes the callback when there
1699 * are new completions on that ring. In many implementations
1700 * the @callback executes in the hw interrupt context.
1701 * @rxd_init: Ring's descriptor-initialize callback.
1702 * See vxge_hw_ring_rxd_init_f{}.
1703 * If not NULL, HW invokes the callback when opening
1704 * the ring.
1705 * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
1706 * HW invokes the callback when closing the corresponding ring.
1707 * See also vxge_hw_ring_rxd_term_f{}.
1708 * @userdata: User-defined "context" of _that_ ring. Passed back to the
1709 * user as one of the @callback, @rxd_init, and @rxd_term arguments.
1710 * @per_rxd_space: If specified (i.e., greater than zero): extra space
1711 * reserved by HW per each receive descriptor.
1712 * Can be used to store
1713 * and retrieve on completion, information specific
1714 * to the driver.
1715 *
1716 * Ring open "template". User fills the structure with ring
1717 * attributes and passes it to vxge_hw_vpath_open().
1718 */
1719struct vxge_hw_ring_attr {
1720 enum vxge_hw_status (*callback)(
1721 struct __vxge_hw_ring *ringh,
1722 void *rxdh,
1723 u8 t_code,
1724 void *userdata);
1725
1726 enum vxge_hw_status (*rxd_init)(
1727 void *rxdh,
1728 void *userdata);
1729
1730 void (*rxd_term)(
1731 void *rxdh,
1732 enum vxge_hw_rxd_state state,
1733 void *userdata);
1734
1735 void *userdata;
1736 u32 per_rxd_space;
1737};
1738
1739/**
1740 * function vxge_hw_fifo_callback_f - FIFO callback.
1741 * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
1742 * descriptors.
1743 * @txdlh: First completed descriptor.
1744 * @txdl_priv: Pointer to per txdl space allocated
1745 * @t_code: Transfer code, as per Titan User Guide.
1746 * Returned by HW.
1747 * @host_control: Opaque 64bit data stored by driver inside the Titan
1748 * descriptor prior to posting the latter on the fifo
1749 * via vxge_hw_fifo_txdl_post(). The @host_control is returned
1750 * as is to the driver with each completed descriptor.
1751 * @userdata: Opaque per-fifo data specified at fifo open
1752 * time, via vxge_hw_vpath_open().
1753 *
1754 * Fifo completion callback (type declaration). A single per-fifo
1755 * callback is specified at fifo open time, via
1756 * vxge_hw_vpath_open(). Typically gets called as part of the processing
1757 * of the Interrupt Service Routine.
1758 *
1759 * Fifo callback gets called by HW if, and only if, there is at least
1760 * one new completion on a given fifo. Upon processing the first @txdlh driver
1761 * is _supposed_ to continue consuming completions using:
1762 * - vxge_hw_fifo_txdl_next_completed()
1763 *
1764 * Note that failure to process new completions in a timely fashion
1765 * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
1766 *
1767 * Non-zero @t_code means failure to process transmit descriptor.
1768 *
1769 * In the "transmit" case the failure could happen, for instance, when the
1770 * link is down, in which case Titan completes the descriptor because it
1771 * is not able to send the data out.
1772 *
1773 * For details please refer to Titan User Guide.
1774 *
1775 * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
1776 */
1777/**
1778 * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
1779 * @txdlh: First completed descriptor.
1780 * @txdl_priv: Pointer to per txdl space allocated
1781 * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
1782 * @userdata: Per-fifo user data (a.k.a. context) specified at
1783 * fifo open time, via vxge_hw_vpath_open().
1784 *
1785 * Terminate descriptor callback. Unless NULL is specified in the
1786 * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
1787 * HW invokes the callback as part of closing fifo, prior to
1788 * de-allocating the ring and associated data structures
1789 * (including descriptors).
1790 * driver should utilize the callback to (for instance) unmap
1791 * and free DMA data buffers associated with the posted (state =
1792 * VXGE_HW_TXDL_STATE_POSTED) descriptors,
1793 * as well as other relevant cleanup functions.
1794 *
1795 * See also: struct vxge_hw_fifo_attr{}
1796 */
1797/**
1798 * struct vxge_hw_fifo_attr - Fifo open "template".
1799 * @callback: Fifo completion callback. HW invokes the callback when there
1800 * are new completions on that fifo. In many implementations
1801 * the @callback executes in the hw interrupt context.
1802 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
1803 * HW invokes the callback when closing the corresponding fifo.
1804 * See also vxge_hw_fifo_txdl_term_f{}.
1805 * @userdata: User-defined "context" of _that_ fifo. Passed back to the
1806 * user as one of the @callback, and @txdl_term arguments.
1807 * @per_txdl_space: If specified (i.e., greater than zero): extra space
1808 * reserved by HW per each transmit descriptor. Can be used to
1809 * store, and retrieve on completion, information specific
1810 * to the driver.
1811 *
1812 * Fifo open "template". User fills the structure with fifo
1813 * attributes and passes it to vxge_hw_vpath_open().
1814 */
1815struct vxge_hw_fifo_attr {
1816
1817 enum vxge_hw_status (*callback)(
1818 struct __vxge_hw_fifo *fifo_handle,
1819 void *txdlh,
1820 enum vxge_hw_fifo_tcode t_code,
1821 void *userdata,
1822 struct sk_buff ***skb_ptr,
1823 int nr_skb, int *more);
1824
1825 void (*txdl_term)(
1826 void *txdlh,
1827 enum vxge_hw_txdl_state state,
1828 void *userdata);
1829
1830 void *userdata;
1831 u32 per_txdl_space;
1832};
1833
1834/**
1835 * struct vxge_hw_vpath_attr - Attributes of virtual path
1836 * @vp_id: Identifier of Virtual Path
1837 * @ring_attr: Attributes of ring for non-offload receive
1838 * @fifo_attr: Attributes of fifo for non-offload transmit
1839 *
1840 * Attributes of virtual path. This structure is passed as parameter
1841 * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
1842 */
1843struct vxge_hw_vpath_attr {
1844 u32 vp_id;
1845 struct vxge_hw_ring_attr ring_attr;
1846 struct vxge_hw_fifo_attr fifo_attr;
1847};
1848
1849enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
1850 void __iomem *bar0,
1851 struct vxge_hw_device_hw_info *hw_info);
1852
1853enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
1854 struct vxge_hw_device_config *device_config);
1855
1856/**
1857 * vxge_hw_device_link_state_get - Get link state.
1858 * @devh: HW device handle.
1859 *
1860 * Get link state.
1861 * Returns: link state.
1862 */
1863static inline
1864enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
1865 struct __vxge_hw_device *devh)
1866{
1867 return devh->link_state;
1868}
1869
1870void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
1871
1872const u8 *
1873vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
1874
1875u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
1876
1877const u8 *
1878vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
1879
1880enum vxge_hw_status __devinit vxge_hw_device_initialize(
1881 struct __vxge_hw_device **devh,
1882 struct vxge_hw_device_attr *attr,
1883 struct vxge_hw_device_config *device_config);
1884
1885enum vxge_hw_status vxge_hw_device_getpause_data(
1886 struct __vxge_hw_device *devh,
1887 u32 port,
1888 u32 *tx,
1889 u32 *rx);
1890
1891enum vxge_hw_status vxge_hw_device_setpause_data(
1892 struct __vxge_hw_device *devh,
1893 u32 port,
1894 u32 tx,
1895 u32 rx);
1896
1897static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
1898 unsigned long size,
1899 struct pci_dev **p_dmah,
1900 struct pci_dev **p_dma_acch)
1901{
1902 gfp_t flags;
1903 void *vaddr;
1904 unsigned long misaligned = 0;
1905 int realloc_flag = 0;
1906 *p_dma_acch = *p_dmah = NULL;
1907
1908 if (in_interrupt())
1909 flags = GFP_ATOMIC | GFP_DMA;
1910 else
1911 flags = GFP_KERNEL | GFP_DMA;
1912realloc:
1913 vaddr = kmalloc((size), flags);
1914 if (vaddr == NULL)
1915 return vaddr;
1916 misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
1917 VXGE_CACHE_LINE_SIZE);
1918 if (realloc_flag)
1919 goto out;
1920
1921 if (misaligned) {
1922 /* misaligned, free current one and try allocating
1923 * size + VXGE_CACHE_LINE_SIZE memory
1924 */
1925 kfree((void *) vaddr);
1926 size += VXGE_CACHE_LINE_SIZE;
1927 realloc_flag = 1;
1928 goto realloc;
1929 }
1930out:
1931 *(unsigned long *)p_dma_acch = misaligned;
1932 vaddr = (void *)((u8 *)vaddr + misaligned);
1933 return vaddr;
1934}
1935
1936static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
1937 struct pci_dev **p_dma_acch)
1938{
1939 unsigned long misaligned = *(unsigned long *)p_dma_acch;
1940 u8 *tmp = (u8 *)vaddr;
1941 tmp -= misaligned;
1942 kfree((void *)tmp);
1943}
1944
1945/*
1946 * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1947 */
1948static inline void*
1949__vxge_hw_mempool_item_priv(
1950 struct vxge_hw_mempool *mempool,
1951 u32 memblock_idx,
1952 void *item,
1953 u32 *memblock_item_idx)
1954{
1955 ptrdiff_t offset;
1956 void *memblock = mempool->memblocks_arr[memblock_idx];
1957
1958
1959 offset = (u32)((u8 *)item - (u8 *)memblock);
1960 vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
1961
1962 (*memblock_item_idx) = (u32) offset / mempool->item_size;
1963 vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
1964
1965 return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
1966 (*memblock_item_idx) * mempool->items_priv_size;
1967}
1968
1969/*
1970 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
1971 * for the fifo.
1972 * @fifo: Fifo
1973 * @txdp: Poniter to a TxD
1974 */
1975static inline struct __vxge_hw_fifo_txdl_priv *
1976__vxge_hw_fifo_txdl_priv(
1977 struct __vxge_hw_fifo *fifo,
1978 struct vxge_hw_fifo_txd *txdp)
1979{
1980 return (struct __vxge_hw_fifo_txdl_priv *)
1981 (((char *)((ulong)txdp->host_control)) +
1982 fifo->per_txdl_space);
1983}
1984
1985enum vxge_hw_status vxge_hw_vpath_open(
1986 struct __vxge_hw_device *devh,
1987 struct vxge_hw_vpath_attr *attr,
1988 struct __vxge_hw_vpath_handle **vpath_handle);
1989
1990enum vxge_hw_status vxge_hw_vpath_close(
1991 struct __vxge_hw_vpath_handle *vpath_handle);
1992
1993enum vxge_hw_status
1994vxge_hw_vpath_reset(
1995 struct __vxge_hw_vpath_handle *vpath_handle);
1996
1997enum vxge_hw_status
1998vxge_hw_vpath_recover_from_reset(
1999 struct __vxge_hw_vpath_handle *vpath_handle);
2000
2001void
2002vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
2003
2004enum vxge_hw_status
2005vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
2006
2007enum vxge_hw_status vxge_hw_vpath_mtu_set(
2008 struct __vxge_hw_vpath_handle *vpath_handle,
2009 u32 new_mtu);
2010
2011void
2012vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
2013
2014#ifndef readq
2015static inline u64 readq(void __iomem *addr)
2016{
2017 u64 ret = 0;
2018 ret = readl(addr + 4);
2019 ret <<= 32;
2020 ret |= readl(addr);
2021
2022 return ret;
2023}
2024#endif
2025
2026#ifndef writeq
2027static inline void writeq(u64 val, void __iomem *addr)
2028{
2029 writel((u32) (val), addr);
2030 writel((u32) (val >> 32), (addr + 4));
2031}
2032#endif
2033
2034static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
2035{
2036 writel(val, addr + 4);
2037}
2038
2039static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
2040{
2041 writel(val, addr);
2042}
2043
2044enum vxge_hw_status
2045vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
2046
2047enum vxge_hw_status
2048vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2049
2050/**
2051 * vxge_debug_ll
2052 * @level: level of debug verbosity.
2053 * @mask: mask for the debug
2054 * @buf: Circular buffer for tracing
2055 * @fmt: printf like format string
2056 *
2057 * Provides logging facilities. Can be customized on per-module
2058 * basis or/and with debug levels. Input parameters, except
2059 * module and level, are the same as posix printf. This function
2060 * may be compiled out if DEBUG macro was never defined.
2061 * See also: enum vxge_debug_level{}.
2062 */
2063#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
2064#define vxge_debug_ll(level, mask, fmt, ...) do { \
2065 if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
2066 (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
2067 if ((mask & VXGE_DEBUG_MASK) == mask) \
2068 printk(fmt "\n", __VA_ARGS__); \
2069} while (0)
2070#else
2071#define vxge_debug_ll(level, mask, fmt, ...)
2072#endif
2073
2074enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
2075 struct __vxge_hw_vpath_handle **vpath_handles,
2076 u32 vpath_count,
2077 u8 *mtable,
2078 u8 *itable,
2079 u32 itable_size);
2080
2081enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2082 struct __vxge_hw_vpath_handle *vpath_handle,
2083 enum vxge_hw_rth_algoritms algorithm,
2084 struct vxge_hw_rth_hash_types *hash_type,
2085 u16 bucket_size);
2086
2087enum vxge_hw_status
2088__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
2089
2090#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
2091#define VXGE_HW_MAX_POLLING_COUNT 100
2092
2093void
2094vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
2095
2096enum vxge_hw_status
2097vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
2098 u32 *minor, u32 *build);
2099
2100enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
2101
2102enum vxge_hw_status
2103vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
2104 int size);
2105
2106enum vxge_hw_status
2107vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
2108 struct eprom_image *eprom_image_data);
2109
2110int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
2111#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
new file mode 100644
index 000000000000..92dd72d3f9de
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -0,0 +1,1132 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-ethtool.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#include <linux/ethtool.h>
15#include <linux/slab.h>
16#include <linux/pci.h>
17#include <linux/etherdevice.h>
18
19#include "vxge-ethtool.h"
20
21/**
22 * vxge_ethtool_sset - Sets different link parameters.
23 * @dev: device pointer.
24 * @info: pointer to the structure with parameters given by ethtool to set
25 * link information.
26 *
27 * The function sets different link parameters provided by the user onto
28 * the NIC.
29 * Return value:
30 * 0 on success.
31 */
32static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
33{
34 /* We currently only support 10Gb/FULL */
35 if ((info->autoneg == AUTONEG_ENABLE) ||
36 (ethtool_cmd_speed(info) != SPEED_10000) ||
37 (info->duplex != DUPLEX_FULL))
38 return -EINVAL;
39
40 return 0;
41}
42
43/**
44 * vxge_ethtool_gset - Return link specific information.
45 * @dev: device pointer.
46 * @info: pointer to the structure with parameters given by ethtool
47 * to return link information.
48 *
49 * Returns link specific information like speed, duplex etc.. to ethtool.
50 * Return value :
51 * return 0 on success.
52 */
53static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
54{
55 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
56 info->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
57 info->port = PORT_FIBRE;
58
59 info->transceiver = XCVR_EXTERNAL;
60
61 if (netif_carrier_ok(dev)) {
62 ethtool_cmd_speed_set(info, SPEED_10000);
63 info->duplex = DUPLEX_FULL;
64 } else {
65 ethtool_cmd_speed_set(info, -1);
66 info->duplex = -1;
67 }
68
69 info->autoneg = AUTONEG_DISABLE;
70 return 0;
71}
72
73/**
74 * vxge_ethtool_gdrvinfo - Returns driver specific information.
75 * @dev: device pointer.
76 * @info: pointer to the structure with parameters given by ethtool to
77 * return driver information.
78 *
79 * Returns driver specefic information like name, version etc.. to ethtool.
80 */
81static void vxge_ethtool_gdrvinfo(struct net_device *dev,
82 struct ethtool_drvinfo *info)
83{
84 struct vxgedev *vdev = netdev_priv(dev);
85 strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
86 strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
87 strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
88 strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
89 info->regdump_len = sizeof(struct vxge_hw_vpath_reg)
90 * vdev->no_of_vpath;
91
92 info->n_stats = STAT_LEN;
93}
94
95/**
96 * vxge_ethtool_gregs - dumps the entire space of Titan into the buffer.
97 * @dev: device pointer.
98 * @regs: pointer to the structure with parameters given by ethtool for
99 * dumping the registers.
100 * @reg_space: The input argumnet into which all the registers are dumped.
101 *
102 * Dumps the vpath register space of Titan NIC into the user given
103 * buffer area.
104 */
105static void vxge_ethtool_gregs(struct net_device *dev,
106 struct ethtool_regs *regs, void *space)
107{
108 int index, offset;
109 enum vxge_hw_status status;
110 u64 reg;
111 u64 *reg_space = (u64 *)space;
112 struct vxgedev *vdev = netdev_priv(dev);
113 struct __vxge_hw_device *hldev = vdev->devh;
114
115 regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
116 regs->version = vdev->pdev->subsystem_device;
117 for (index = 0; index < vdev->no_of_vpath; index++) {
118 for (offset = 0; offset < sizeof(struct vxge_hw_vpath_reg);
119 offset += 8) {
120 status = vxge_hw_mgmt_reg_read(hldev,
121 vxge_hw_mgmt_reg_type_vpath,
122 vdev->vpaths[index].device_id,
123 offset, &reg);
124 if (status != VXGE_HW_OK) {
125 vxge_debug_init(VXGE_ERR,
126 "%s:%d Getting reg dump Failed",
127 __func__, __LINE__);
128 return;
129 }
130 *reg_space++ = reg;
131 }
132 }
133}
134
135/**
136 * vxge_ethtool_idnic - To physically identify the nic on the system.
137 * @dev : device pointer.
138 * @state : requested LED state
139 *
140 * Used to physically identify the NIC on the system.
141 * 0 on success
142 */
143static int vxge_ethtool_idnic(struct net_device *dev,
144 enum ethtool_phys_id_state state)
145{
146 struct vxgedev *vdev = netdev_priv(dev);
147 struct __vxge_hw_device *hldev = vdev->devh;
148
149 switch (state) {
150 case ETHTOOL_ID_ACTIVE:
151 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
152 break;
153
154 case ETHTOOL_ID_INACTIVE:
155 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF);
156 break;
157
158 default:
159 return -EINVAL;
160 }
161
162 return 0;
163}
164
165/**
166 * vxge_ethtool_getpause_data - Pause frame frame generation and reception.
167 * @dev : device pointer.
168 * @ep : pointer to the structure with pause parameters given by ethtool.
169 * Description:
170 * Returns the Pause frame generation and reception capability of the NIC.
171 * Return value:
172 * void
173 */
174static void vxge_ethtool_getpause_data(struct net_device *dev,
175 struct ethtool_pauseparam *ep)
176{
177 struct vxgedev *vdev = netdev_priv(dev);
178 struct __vxge_hw_device *hldev = vdev->devh;
179
180 vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
181}
182
183/**
184 * vxge_ethtool_setpause_data - set/reset pause frame generation.
185 * @dev : device pointer.
186 * @ep : pointer to the structure with pause parameters given by ethtool.
187 * Description:
188 * It can be used to set or reset Pause frame generation or reception
189 * support of the NIC.
190 * Return value:
191 * int, returns 0 on Success
192 */
193static int vxge_ethtool_setpause_data(struct net_device *dev,
194 struct ethtool_pauseparam *ep)
195{
196 struct vxgedev *vdev = netdev_priv(dev);
197 struct __vxge_hw_device *hldev = vdev->devh;
198
199 vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
200
201 vdev->config.tx_pause_enable = ep->tx_pause;
202 vdev->config.rx_pause_enable = ep->rx_pause;
203
204 return 0;
205}
206
207static void vxge_get_ethtool_stats(struct net_device *dev,
208 struct ethtool_stats *estats, u64 *tmp_stats)
209{
210 int j, k;
211 enum vxge_hw_status status;
212 enum vxge_hw_status swstatus;
213 struct vxge_vpath *vpath = NULL;
214 struct vxgedev *vdev = netdev_priv(dev);
215 struct __vxge_hw_device *hldev = vdev->devh;
216 struct vxge_hw_xmac_stats *xmac_stats;
217 struct vxge_hw_device_stats_sw_info *sw_stats;
218 struct vxge_hw_device_stats_hw_info *hw_stats;
219
220 u64 *ptr = tmp_stats;
221
222 memset(tmp_stats, 0,
223 vxge_ethtool_get_sset_count(dev, ETH_SS_STATS) * sizeof(u64));
224
225 xmac_stats = kzalloc(sizeof(struct vxge_hw_xmac_stats), GFP_KERNEL);
226 if (xmac_stats == NULL) {
227 vxge_debug_init(VXGE_ERR,
228 "%s : %d Memory Allocation failed for xmac_stats",
229 __func__, __LINE__);
230 return;
231 }
232
233 sw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_sw_info),
234 GFP_KERNEL);
235 if (sw_stats == NULL) {
236 kfree(xmac_stats);
237 vxge_debug_init(VXGE_ERR,
238 "%s : %d Memory Allocation failed for sw_stats",
239 __func__, __LINE__);
240 return;
241 }
242
243 hw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_hw_info),
244 GFP_KERNEL);
245 if (hw_stats == NULL) {
246 kfree(xmac_stats);
247 kfree(sw_stats);
248 vxge_debug_init(VXGE_ERR,
249 "%s : %d Memory Allocation failed for hw_stats",
250 __func__, __LINE__);
251 return;
252 }
253
254 *ptr++ = 0;
255 status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats);
256 if (status != VXGE_HW_OK) {
257 if (status != VXGE_HW_ERR_PRIVILAGED_OPEARATION) {
258 vxge_debug_init(VXGE_ERR,
259 "%s : %d Failure in getting xmac stats",
260 __func__, __LINE__);
261 }
262 }
263 swstatus = vxge_hw_driver_stats_get(hldev, sw_stats);
264 if (swstatus != VXGE_HW_OK) {
265 vxge_debug_init(VXGE_ERR,
266 "%s : %d Failure in getting sw stats",
267 __func__, __LINE__);
268 }
269
270 status = vxge_hw_device_stats_get(hldev, hw_stats);
271 if (status != VXGE_HW_OK) {
272 vxge_debug_init(VXGE_ERR,
273 "%s : %d hw_stats_get error", __func__, __LINE__);
274 }
275
276 for (k = 0; k < vdev->no_of_vpath; k++) {
277 struct vxge_hw_vpath_stats_hw_info *vpath_info;
278
279 vpath = &vdev->vpaths[k];
280 j = vpath->device_id;
281 vpath_info = hw_stats->vpath_info[j];
282 if (!vpath_info) {
283 memset(ptr, 0, (VXGE_HW_VPATH_TX_STATS_LEN +
284 VXGE_HW_VPATH_RX_STATS_LEN) * sizeof(u64));
285 ptr += (VXGE_HW_VPATH_TX_STATS_LEN +
286 VXGE_HW_VPATH_RX_STATS_LEN);
287 continue;
288 }
289
290 *ptr++ = vpath_info->tx_stats.tx_ttl_eth_frms;
291 *ptr++ = vpath_info->tx_stats.tx_ttl_eth_octets;
292 *ptr++ = vpath_info->tx_stats.tx_data_octets;
293 *ptr++ = vpath_info->tx_stats.tx_mcast_frms;
294 *ptr++ = vpath_info->tx_stats.tx_bcast_frms;
295 *ptr++ = vpath_info->tx_stats.tx_ucast_frms;
296 *ptr++ = vpath_info->tx_stats.tx_tagged_frms;
297 *ptr++ = vpath_info->tx_stats.tx_vld_ip;
298 *ptr++ = vpath_info->tx_stats.tx_vld_ip_octets;
299 *ptr++ = vpath_info->tx_stats.tx_icmp;
300 *ptr++ = vpath_info->tx_stats.tx_tcp;
301 *ptr++ = vpath_info->tx_stats.tx_rst_tcp;
302 *ptr++ = vpath_info->tx_stats.tx_udp;
303 *ptr++ = vpath_info->tx_stats.tx_unknown_protocol;
304 *ptr++ = vpath_info->tx_stats.tx_lost_ip;
305 *ptr++ = vpath_info->tx_stats.tx_parse_error;
306 *ptr++ = vpath_info->tx_stats.tx_tcp_offload;
307 *ptr++ = vpath_info->tx_stats.tx_retx_tcp_offload;
308 *ptr++ = vpath_info->tx_stats.tx_lost_ip_offload;
309 *ptr++ = vpath_info->rx_stats.rx_ttl_eth_frms;
310 *ptr++ = vpath_info->rx_stats.rx_vld_frms;
311 *ptr++ = vpath_info->rx_stats.rx_offload_frms;
312 *ptr++ = vpath_info->rx_stats.rx_ttl_eth_octets;
313 *ptr++ = vpath_info->rx_stats.rx_data_octets;
314 *ptr++ = vpath_info->rx_stats.rx_offload_octets;
315 *ptr++ = vpath_info->rx_stats.rx_vld_mcast_frms;
316 *ptr++ = vpath_info->rx_stats.rx_vld_bcast_frms;
317 *ptr++ = vpath_info->rx_stats.rx_accepted_ucast_frms;
318 *ptr++ = vpath_info->rx_stats.rx_accepted_nucast_frms;
319 *ptr++ = vpath_info->rx_stats.rx_tagged_frms;
320 *ptr++ = vpath_info->rx_stats.rx_long_frms;
321 *ptr++ = vpath_info->rx_stats.rx_usized_frms;
322 *ptr++ = vpath_info->rx_stats.rx_osized_frms;
323 *ptr++ = vpath_info->rx_stats.rx_frag_frms;
324 *ptr++ = vpath_info->rx_stats.rx_jabber_frms;
325 *ptr++ = vpath_info->rx_stats.rx_ttl_64_frms;
326 *ptr++ = vpath_info->rx_stats.rx_ttl_65_127_frms;
327 *ptr++ = vpath_info->rx_stats.rx_ttl_128_255_frms;
328 *ptr++ = vpath_info->rx_stats.rx_ttl_256_511_frms;
329 *ptr++ = vpath_info->rx_stats.rx_ttl_512_1023_frms;
330 *ptr++ = vpath_info->rx_stats.rx_ttl_1024_1518_frms;
331 *ptr++ = vpath_info->rx_stats.rx_ttl_1519_4095_frms;
332 *ptr++ = vpath_info->rx_stats.rx_ttl_4096_8191_frms;
333 *ptr++ = vpath_info->rx_stats.rx_ttl_8192_max_frms;
334 *ptr++ = vpath_info->rx_stats.rx_ttl_gt_max_frms;
335 *ptr++ = vpath_info->rx_stats.rx_ip;
336 *ptr++ = vpath_info->rx_stats.rx_accepted_ip;
337 *ptr++ = vpath_info->rx_stats.rx_ip_octets;
338 *ptr++ = vpath_info->rx_stats.rx_err_ip;
339 *ptr++ = vpath_info->rx_stats.rx_icmp;
340 *ptr++ = vpath_info->rx_stats.rx_tcp;
341 *ptr++ = vpath_info->rx_stats.rx_udp;
342 *ptr++ = vpath_info->rx_stats.rx_err_tcp;
343 *ptr++ = vpath_info->rx_stats.rx_lost_frms;
344 *ptr++ = vpath_info->rx_stats.rx_lost_ip;
345 *ptr++ = vpath_info->rx_stats.rx_lost_ip_offload;
346 *ptr++ = vpath_info->rx_stats.rx_various_discard;
347 *ptr++ = vpath_info->rx_stats.rx_sleep_discard;
348 *ptr++ = vpath_info->rx_stats.rx_red_discard;
349 *ptr++ = vpath_info->rx_stats.rx_queue_full_discard;
350 *ptr++ = vpath_info->rx_stats.rx_mpa_ok_frms;
351 }
352 *ptr++ = 0;
353 for (k = 0; k < vdev->max_config_port; k++) {
354 *ptr++ = xmac_stats->aggr_stats[k].tx_frms;
355 *ptr++ = xmac_stats->aggr_stats[k].tx_data_octets;
356 *ptr++ = xmac_stats->aggr_stats[k].tx_mcast_frms;
357 *ptr++ = xmac_stats->aggr_stats[k].tx_bcast_frms;
358 *ptr++ = xmac_stats->aggr_stats[k].tx_discarded_frms;
359 *ptr++ = xmac_stats->aggr_stats[k].tx_errored_frms;
360 *ptr++ = xmac_stats->aggr_stats[k].rx_frms;
361 *ptr++ = xmac_stats->aggr_stats[k].rx_data_octets;
362 *ptr++ = xmac_stats->aggr_stats[k].rx_mcast_frms;
363 *ptr++ = xmac_stats->aggr_stats[k].rx_bcast_frms;
364 *ptr++ = xmac_stats->aggr_stats[k].rx_discarded_frms;
365 *ptr++ = xmac_stats->aggr_stats[k].rx_errored_frms;
366 *ptr++ = xmac_stats->aggr_stats[k].rx_unknown_slow_proto_frms;
367 }
368 *ptr++ = 0;
369 for (k = 0; k < vdev->max_config_port; k++) {
370 *ptr++ = xmac_stats->port_stats[k].tx_ttl_frms;
371 *ptr++ = xmac_stats->port_stats[k].tx_ttl_octets;
372 *ptr++ = xmac_stats->port_stats[k].tx_data_octets;
373 *ptr++ = xmac_stats->port_stats[k].tx_mcast_frms;
374 *ptr++ = xmac_stats->port_stats[k].tx_bcast_frms;
375 *ptr++ = xmac_stats->port_stats[k].tx_ucast_frms;
376 *ptr++ = xmac_stats->port_stats[k].tx_tagged_frms;
377 *ptr++ = xmac_stats->port_stats[k].tx_vld_ip;
378 *ptr++ = xmac_stats->port_stats[k].tx_vld_ip_octets;
379 *ptr++ = xmac_stats->port_stats[k].tx_icmp;
380 *ptr++ = xmac_stats->port_stats[k].tx_tcp;
381 *ptr++ = xmac_stats->port_stats[k].tx_rst_tcp;
382 *ptr++ = xmac_stats->port_stats[k].tx_udp;
383 *ptr++ = xmac_stats->port_stats[k].tx_parse_error;
384 *ptr++ = xmac_stats->port_stats[k].tx_unknown_protocol;
385 *ptr++ = xmac_stats->port_stats[k].tx_pause_ctrl_frms;
386 *ptr++ = xmac_stats->port_stats[k].tx_marker_pdu_frms;
387 *ptr++ = xmac_stats->port_stats[k].tx_lacpdu_frms;
388 *ptr++ = xmac_stats->port_stats[k].tx_drop_ip;
389 *ptr++ = xmac_stats->port_stats[k].tx_marker_resp_pdu_frms;
390 *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char2_match;
391 *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char1_match;
392 *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column2_match;
393 *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column1_match;
394 *ptr++ = xmac_stats->port_stats[k].tx_any_err_frms;
395 *ptr++ = xmac_stats->port_stats[k].tx_drop_frms;
396 *ptr++ = xmac_stats->port_stats[k].rx_ttl_frms;
397 *ptr++ = xmac_stats->port_stats[k].rx_vld_frms;
398 *ptr++ = xmac_stats->port_stats[k].rx_offload_frms;
399 *ptr++ = xmac_stats->port_stats[k].rx_ttl_octets;
400 *ptr++ = xmac_stats->port_stats[k].rx_data_octets;
401 *ptr++ = xmac_stats->port_stats[k].rx_offload_octets;
402 *ptr++ = xmac_stats->port_stats[k].rx_vld_mcast_frms;
403 *ptr++ = xmac_stats->port_stats[k].rx_vld_bcast_frms;
404 *ptr++ = xmac_stats->port_stats[k].rx_accepted_ucast_frms;
405 *ptr++ = xmac_stats->port_stats[k].rx_accepted_nucast_frms;
406 *ptr++ = xmac_stats->port_stats[k].rx_tagged_frms;
407 *ptr++ = xmac_stats->port_stats[k].rx_long_frms;
408 *ptr++ = xmac_stats->port_stats[k].rx_usized_frms;
409 *ptr++ = xmac_stats->port_stats[k].rx_osized_frms;
410 *ptr++ = xmac_stats->port_stats[k].rx_frag_frms;
411 *ptr++ = xmac_stats->port_stats[k].rx_jabber_frms;
412 *ptr++ = xmac_stats->port_stats[k].rx_ttl_64_frms;
413 *ptr++ = xmac_stats->port_stats[k].rx_ttl_65_127_frms;
414 *ptr++ = xmac_stats->port_stats[k].rx_ttl_128_255_frms;
415 *ptr++ = xmac_stats->port_stats[k].rx_ttl_256_511_frms;
416 *ptr++ = xmac_stats->port_stats[k].rx_ttl_512_1023_frms;
417 *ptr++ = xmac_stats->port_stats[k].rx_ttl_1024_1518_frms;
418 *ptr++ = xmac_stats->port_stats[k].rx_ttl_1519_4095_frms;
419 *ptr++ = xmac_stats->port_stats[k].rx_ttl_4096_8191_frms;
420 *ptr++ = xmac_stats->port_stats[k].rx_ttl_8192_max_frms;
421 *ptr++ = xmac_stats->port_stats[k].rx_ttl_gt_max_frms;
422 *ptr++ = xmac_stats->port_stats[k].rx_ip;
423 *ptr++ = xmac_stats->port_stats[k].rx_accepted_ip;
424 *ptr++ = xmac_stats->port_stats[k].rx_ip_octets;
425 *ptr++ = xmac_stats->port_stats[k].rx_err_ip;
426 *ptr++ = xmac_stats->port_stats[k].rx_icmp;
427 *ptr++ = xmac_stats->port_stats[k].rx_tcp;
428 *ptr++ = xmac_stats->port_stats[k].rx_udp;
429 *ptr++ = xmac_stats->port_stats[k].rx_err_tcp;
430 *ptr++ = xmac_stats->port_stats[k].rx_pause_count;
431 *ptr++ = xmac_stats->port_stats[k].rx_pause_ctrl_frms;
432 *ptr++ = xmac_stats->port_stats[k].rx_unsup_ctrl_frms;
433 *ptr++ = xmac_stats->port_stats[k].rx_fcs_err_frms;
434 *ptr++ = xmac_stats->port_stats[k].rx_in_rng_len_err_frms;
435 *ptr++ = xmac_stats->port_stats[k].rx_out_rng_len_err_frms;
436 *ptr++ = xmac_stats->port_stats[k].rx_drop_frms;
437 *ptr++ = xmac_stats->port_stats[k].rx_discarded_frms;
438 *ptr++ = xmac_stats->port_stats[k].rx_drop_ip;
439 *ptr++ = xmac_stats->port_stats[k].rx_drop_udp;
440 *ptr++ = xmac_stats->port_stats[k].rx_marker_pdu_frms;
441 *ptr++ = xmac_stats->port_stats[k].rx_lacpdu_frms;
442 *ptr++ = xmac_stats->port_stats[k].rx_unknown_pdu_frms;
443 *ptr++ = xmac_stats->port_stats[k].rx_marker_resp_pdu_frms;
444 *ptr++ = xmac_stats->port_stats[k].rx_fcs_discard;
445 *ptr++ = xmac_stats->port_stats[k].rx_illegal_pdu_frms;
446 *ptr++ = xmac_stats->port_stats[k].rx_switch_discard;
447 *ptr++ = xmac_stats->port_stats[k].rx_len_discard;
448 *ptr++ = xmac_stats->port_stats[k].rx_rpa_discard;
449 *ptr++ = xmac_stats->port_stats[k].rx_l2_mgmt_discard;
450 *ptr++ = xmac_stats->port_stats[k].rx_rts_discard;
451 *ptr++ = xmac_stats->port_stats[k].rx_trash_discard;
452 *ptr++ = xmac_stats->port_stats[k].rx_buff_full_discard;
453 *ptr++ = xmac_stats->port_stats[k].rx_red_discard;
454 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_ctrl_err_cnt;
455 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_data_err_cnt;
456 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char1_match;
457 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_err_sym;
458 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column1_match;
459 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char2_match;
460 *ptr++ = xmac_stats->port_stats[k].rx_local_fault;
461 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column2_match;
462 *ptr++ = xmac_stats->port_stats[k].rx_jettison;
463 *ptr++ = xmac_stats->port_stats[k].rx_remote_fault;
464 }
465
466 *ptr++ = 0;
467 for (k = 0; k < vdev->no_of_vpath; k++) {
468 struct vxge_hw_vpath_stats_sw_info *vpath_info;
469
470 vpath = &vdev->vpaths[k];
471 j = vpath->device_id;
472 vpath_info = (struct vxge_hw_vpath_stats_sw_info *)
473 &sw_stats->vpath_info[j];
474 *ptr++ = vpath_info->soft_reset_cnt;
475 *ptr++ = vpath_info->error_stats.unknown_alarms;
476 *ptr++ = vpath_info->error_stats.network_sustained_fault;
477 *ptr++ = vpath_info->error_stats.network_sustained_ok;
478 *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_overwrite;
479 *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_poison;
480 *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_dma_error;
481 *ptr++ = vpath_info->error_stats.dblgen_fifo0_overflow;
482 *ptr++ = vpath_info->error_stats.statsb_pif_chain_error;
483 *ptr++ = vpath_info->error_stats.statsb_drop_timeout;
484 *ptr++ = vpath_info->error_stats.target_illegal_access;
485 *ptr++ = vpath_info->error_stats.ini_serr_det;
486 *ptr++ = vpath_info->error_stats.prc_ring_bumps;
487 *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_err;
488 *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_abort;
489 *ptr++ = vpath_info->error_stats.prc_quanta_size_err;
490 *ptr++ = vpath_info->ring_stats.common_stats.full_cnt;
491 *ptr++ = vpath_info->ring_stats.common_stats.usage_cnt;
492 *ptr++ = vpath_info->ring_stats.common_stats.usage_max;
493 *ptr++ = vpath_info->ring_stats.common_stats.
494 reserve_free_swaps_cnt;
495 *ptr++ = vpath_info->ring_stats.common_stats.total_compl_cnt;
496 for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
497 *ptr++ = vpath_info->ring_stats.rxd_t_code_err_cnt[j];
498 *ptr++ = vpath_info->fifo_stats.common_stats.full_cnt;
499 *ptr++ = vpath_info->fifo_stats.common_stats.usage_cnt;
500 *ptr++ = vpath_info->fifo_stats.common_stats.usage_max;
501 *ptr++ = vpath_info->fifo_stats.common_stats.
502 reserve_free_swaps_cnt;
503 *ptr++ = vpath_info->fifo_stats.common_stats.total_compl_cnt;
504 *ptr++ = vpath_info->fifo_stats.total_posts;
505 *ptr++ = vpath_info->fifo_stats.total_buffers;
506 for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
507 *ptr++ = vpath_info->fifo_stats.txd_t_code_err_cnt[j];
508 }
509
510 *ptr++ = 0;
511 for (k = 0; k < vdev->no_of_vpath; k++) {
512 struct vxge_hw_vpath_stats_hw_info *vpath_info;
513 vpath = &vdev->vpaths[k];
514 j = vpath->device_id;
515 vpath_info = hw_stats->vpath_info[j];
516 if (!vpath_info) {
517 memset(ptr, 0, VXGE_HW_VPATH_STATS_LEN * sizeof(u64));
518 ptr += VXGE_HW_VPATH_STATS_LEN;
519 continue;
520 }
521 *ptr++ = vpath_info->ini_num_mwr_sent;
522 *ptr++ = vpath_info->ini_num_mrd_sent;
523 *ptr++ = vpath_info->ini_num_cpl_rcvd;
524 *ptr++ = vpath_info->ini_num_mwr_byte_sent;
525 *ptr++ = vpath_info->ini_num_cpl_byte_rcvd;
526 *ptr++ = vpath_info->wrcrdtarb_xoff;
527 *ptr++ = vpath_info->rdcrdtarb_xoff;
528 *ptr++ = vpath_info->vpath_genstats_count0;
529 *ptr++ = vpath_info->vpath_genstats_count1;
530 *ptr++ = vpath_info->vpath_genstats_count2;
531 *ptr++ = vpath_info->vpath_genstats_count3;
532 *ptr++ = vpath_info->vpath_genstats_count4;
533 *ptr++ = vpath_info->vpath_genstats_count5;
534 *ptr++ = vpath_info->prog_event_vnum0;
535 *ptr++ = vpath_info->prog_event_vnum1;
536 *ptr++ = vpath_info->prog_event_vnum2;
537 *ptr++ = vpath_info->prog_event_vnum3;
538 *ptr++ = vpath_info->rx_multi_cast_frame_discard;
539 *ptr++ = vpath_info->rx_frm_transferred;
540 *ptr++ = vpath_info->rxd_returned;
541 *ptr++ = vpath_info->rx_mpa_len_fail_frms;
542 *ptr++ = vpath_info->rx_mpa_mrk_fail_frms;
543 *ptr++ = vpath_info->rx_mpa_crc_fail_frms;
544 *ptr++ = vpath_info->rx_permitted_frms;
545 *ptr++ = vpath_info->rx_vp_reset_discarded_frms;
546 *ptr++ = vpath_info->rx_wol_frms;
547 *ptr++ = vpath_info->tx_vp_reset_discarded_frms;
548 }
549
550 *ptr++ = 0;
551 *ptr++ = vdev->stats.vpaths_open;
552 *ptr++ = vdev->stats.vpath_open_fail;
553 *ptr++ = vdev->stats.link_up;
554 *ptr++ = vdev->stats.link_down;
555
556 for (k = 0; k < vdev->no_of_vpath; k++) {
557 *ptr += vdev->vpaths[k].fifo.stats.tx_frms;
558 *(ptr + 1) += vdev->vpaths[k].fifo.stats.tx_errors;
559 *(ptr + 2) += vdev->vpaths[k].fifo.stats.tx_bytes;
560 *(ptr + 3) += vdev->vpaths[k].fifo.stats.txd_not_free;
561 *(ptr + 4) += vdev->vpaths[k].fifo.stats.txd_out_of_desc;
562 *(ptr + 5) += vdev->vpaths[k].ring.stats.rx_frms;
563 *(ptr + 6) += vdev->vpaths[k].ring.stats.rx_errors;
564 *(ptr + 7) += vdev->vpaths[k].ring.stats.rx_bytes;
565 *(ptr + 8) += vdev->vpaths[k].ring.stats.rx_mcast;
566 *(ptr + 9) += vdev->vpaths[k].fifo.stats.pci_map_fail +
567 vdev->vpaths[k].ring.stats.pci_map_fail;
568 *(ptr + 10) += vdev->vpaths[k].ring.stats.skb_alloc_fail;
569 }
570
571 ptr += 12;
572
573 kfree(xmac_stats);
574 kfree(sw_stats);
575 kfree(hw_stats);
576}
577
578static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
579 u8 *data)
580{
581 int stat_size = 0;
582 int i, j;
583 struct vxgedev *vdev = netdev_priv(dev);
584 switch (stringset) {
585 case ETH_SS_STATS:
586 vxge_add_string("VPATH STATISTICS%s\t\t\t",
587 &stat_size, data, "");
588 for (i = 0; i < vdev->no_of_vpath; i++) {
589 vxge_add_string("tx_ttl_eth_frms_%d\t\t\t",
590 &stat_size, data, i);
591 vxge_add_string("tx_ttl_eth_octects_%d\t\t",
592 &stat_size, data, i);
593 vxge_add_string("tx_data_octects_%d\t\t\t",
594 &stat_size, data, i);
595 vxge_add_string("tx_mcast_frms_%d\t\t\t",
596 &stat_size, data, i);
597 vxge_add_string("tx_bcast_frms_%d\t\t\t",
598 &stat_size, data, i);
599 vxge_add_string("tx_ucast_frms_%d\t\t\t",
600 &stat_size, data, i);
601 vxge_add_string("tx_tagged_frms_%d\t\t\t",
602 &stat_size, data, i);
603 vxge_add_string("tx_vld_ip_%d\t\t\t",
604 &stat_size, data, i);
605 vxge_add_string("tx_vld_ip_octects_%d\t\t",
606 &stat_size, data, i);
607 vxge_add_string("tx_icmp_%d\t\t\t\t",
608 &stat_size, data, i);
609 vxge_add_string("tx_tcp_%d\t\t\t\t",
610 &stat_size, data, i);
611 vxge_add_string("tx_rst_tcp_%d\t\t\t",
612 &stat_size, data, i);
613 vxge_add_string("tx_udp_%d\t\t\t\t",
614 &stat_size, data, i);
615 vxge_add_string("tx_unknown_proto_%d\t\t\t",
616 &stat_size, data, i);
617 vxge_add_string("tx_lost_ip_%d\t\t\t",
618 &stat_size, data, i);
619 vxge_add_string("tx_parse_error_%d\t\t\t",
620 &stat_size, data, i);
621 vxge_add_string("tx_tcp_offload_%d\t\t\t",
622 &stat_size, data, i);
623 vxge_add_string("tx_retx_tcp_offload_%d\t\t",
624 &stat_size, data, i);
625 vxge_add_string("tx_lost_ip_offload_%d\t\t",
626 &stat_size, data, i);
627 vxge_add_string("rx_ttl_eth_frms_%d\t\t\t",
628 &stat_size, data, i);
629 vxge_add_string("rx_vld_frms_%d\t\t\t",
630 &stat_size, data, i);
631 vxge_add_string("rx_offload_frms_%d\t\t\t",
632 &stat_size, data, i);
633 vxge_add_string("rx_ttl_eth_octects_%d\t\t",
634 &stat_size, data, i);
635 vxge_add_string("rx_data_octects_%d\t\t\t",
636 &stat_size, data, i);
637 vxge_add_string("rx_offload_octects_%d\t\t",
638 &stat_size, data, i);
639 vxge_add_string("rx_vld_mcast_frms_%d\t\t",
640 &stat_size, data, i);
641 vxge_add_string("rx_vld_bcast_frms_%d\t\t",
642 &stat_size, data, i);
643 vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
644 &stat_size, data, i);
645 vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
646 &stat_size, data, i);
647 vxge_add_string("rx_tagged_frms_%d\t\t\t",
648 &stat_size, data, i);
649 vxge_add_string("rx_long_frms_%d\t\t\t",
650 &stat_size, data, i);
651 vxge_add_string("rx_usized_frms_%d\t\t\t",
652 &stat_size, data, i);
653 vxge_add_string("rx_osized_frms_%d\t\t\t",
654 &stat_size, data, i);
655 vxge_add_string("rx_frag_frms_%d\t\t\t",
656 &stat_size, data, i);
657 vxge_add_string("rx_jabber_frms_%d\t\t\t",
658 &stat_size, data, i);
659 vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
660 &stat_size, data, i);
661 vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
662 &stat_size, data, i);
663 vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
664 &stat_size, data, i);
665 vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
666 &stat_size, data, i);
667 vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
668 &stat_size, data, i);
669 vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
670 &stat_size, data, i);
671 vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
672 &stat_size, data, i);
673 vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
674 &stat_size, data, i);
675 vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
676 &stat_size, data, i);
677 vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
678 &stat_size, data, i);
679 vxge_add_string("rx_ip%d\t\t\t\t",
680 &stat_size, data, i);
681 vxge_add_string("rx_accepted_ip_%d\t\t\t",
682 &stat_size, data, i);
683 vxge_add_string("rx_ip_octects_%d\t\t\t",
684 &stat_size, data, i);
685 vxge_add_string("rx_err_ip_%d\t\t\t",
686 &stat_size, data, i);
687 vxge_add_string("rx_icmp_%d\t\t\t\t",
688 &stat_size, data, i);
689 vxge_add_string("rx_tcp_%d\t\t\t\t",
690 &stat_size, data, i);
691 vxge_add_string("rx_udp_%d\t\t\t\t",
692 &stat_size, data, i);
693 vxge_add_string("rx_err_tcp_%d\t\t\t",
694 &stat_size, data, i);
695 vxge_add_string("rx_lost_frms_%d\t\t\t",
696 &stat_size, data, i);
697 vxge_add_string("rx_lost_ip_%d\t\t\t",
698 &stat_size, data, i);
699 vxge_add_string("rx_lost_ip_offload_%d\t\t",
700 &stat_size, data, i);
701 vxge_add_string("rx_various_discard_%d\t\t",
702 &stat_size, data, i);
703 vxge_add_string("rx_sleep_discard_%d\t\t\t",
704 &stat_size, data, i);
705 vxge_add_string("rx_red_discard_%d\t\t\t",
706 &stat_size, data, i);
707 vxge_add_string("rx_queue_full_discard_%d\t\t",
708 &stat_size, data, i);
709 vxge_add_string("rx_mpa_ok_frms_%d\t\t\t",
710 &stat_size, data, i);
711 }
712
713 vxge_add_string("\nAGGR STATISTICS%s\t\t\t\t",
714 &stat_size, data, "");
715 for (i = 0; i < vdev->max_config_port; i++) {
716 vxge_add_string("tx_frms_%d\t\t\t\t",
717 &stat_size, data, i);
718 vxge_add_string("tx_data_octects_%d\t\t\t",
719 &stat_size, data, i);
720 vxge_add_string("tx_mcast_frms_%d\t\t\t",
721 &stat_size, data, i);
722 vxge_add_string("tx_bcast_frms_%d\t\t\t",
723 &stat_size, data, i);
724 vxge_add_string("tx_discarded_frms_%d\t\t",
725 &stat_size, data, i);
726 vxge_add_string("tx_errored_frms_%d\t\t\t",
727 &stat_size, data, i);
728 vxge_add_string("rx_frms_%d\t\t\t\t",
729 &stat_size, data, i);
730 vxge_add_string("rx_data_octects_%d\t\t\t",
731 &stat_size, data, i);
732 vxge_add_string("rx_mcast_frms_%d\t\t\t",
733 &stat_size, data, i);
734 vxge_add_string("rx_bcast_frms_%d\t\t\t",
735 &stat_size, data, i);
736 vxge_add_string("rx_discarded_frms_%d\t\t",
737 &stat_size, data, i);
738 vxge_add_string("rx_errored_frms_%d\t\t\t",
739 &stat_size, data, i);
740 vxge_add_string("rx_unknown_slow_proto_frms_%d\t",
741 &stat_size, data, i);
742 }
743
744 vxge_add_string("\nPORT STATISTICS%s\t\t\t\t",
745 &stat_size, data, "");
746 for (i = 0; i < vdev->max_config_port; i++) {
747 vxge_add_string("tx_ttl_frms_%d\t\t\t",
748 &stat_size, data, i);
749 vxge_add_string("tx_ttl_octects_%d\t\t\t",
750 &stat_size, data, i);
751 vxge_add_string("tx_data_octects_%d\t\t\t",
752 &stat_size, data, i);
753 vxge_add_string("tx_mcast_frms_%d\t\t\t",
754 &stat_size, data, i);
755 vxge_add_string("tx_bcast_frms_%d\t\t\t",
756 &stat_size, data, i);
757 vxge_add_string("tx_ucast_frms_%d\t\t\t",
758 &stat_size, data, i);
759 vxge_add_string("tx_tagged_frms_%d\t\t\t",
760 &stat_size, data, i);
761 vxge_add_string("tx_vld_ip_%d\t\t\t",
762 &stat_size, data, i);
763 vxge_add_string("tx_vld_ip_octects_%d\t\t",
764 &stat_size, data, i);
765 vxge_add_string("tx_icmp_%d\t\t\t\t",
766 &stat_size, data, i);
767 vxge_add_string("tx_tcp_%d\t\t\t\t",
768 &stat_size, data, i);
769 vxge_add_string("tx_rst_tcp_%d\t\t\t",
770 &stat_size, data, i);
771 vxge_add_string("tx_udp_%d\t\t\t\t",
772 &stat_size, data, i);
773 vxge_add_string("tx_parse_error_%d\t\t\t",
774 &stat_size, data, i);
775 vxge_add_string("tx_unknown_protocol_%d\t\t",
776 &stat_size, data, i);
777 vxge_add_string("tx_pause_ctrl_frms_%d\t\t",
778 &stat_size, data, i);
779 vxge_add_string("tx_marker_pdu_frms_%d\t\t",
780 &stat_size, data, i);
781 vxge_add_string("tx_lacpdu_frms_%d\t\t\t",
782 &stat_size, data, i);
783 vxge_add_string("tx_drop_ip_%d\t\t\t",
784 &stat_size, data, i);
785 vxge_add_string("tx_marker_resp_pdu_frms_%d\t\t",
786 &stat_size, data, i);
787 vxge_add_string("tx_xgmii_char2_match_%d\t\t",
788 &stat_size, data, i);
789 vxge_add_string("tx_xgmii_char1_match_%d\t\t",
790 &stat_size, data, i);
791 vxge_add_string("tx_xgmii_column2_match_%d\t\t",
792 &stat_size, data, i);
793 vxge_add_string("tx_xgmii_column1_match_%d\t\t",
794 &stat_size, data, i);
795 vxge_add_string("tx_any_err_frms_%d\t\t\t",
796 &stat_size, data, i);
797 vxge_add_string("tx_drop_frms_%d\t\t\t",
798 &stat_size, data, i);
799 vxge_add_string("rx_ttl_frms_%d\t\t\t",
800 &stat_size, data, i);
801 vxge_add_string("rx_vld_frms_%d\t\t\t",
802 &stat_size, data, i);
803 vxge_add_string("rx_offload_frms_%d\t\t\t",
804 &stat_size, data, i);
805 vxge_add_string("rx_ttl_octects_%d\t\t\t",
806 &stat_size, data, i);
807 vxge_add_string("rx_data_octects_%d\t\t\t",
808 &stat_size, data, i);
809 vxge_add_string("rx_offload_octects_%d\t\t",
810 &stat_size, data, i);
811 vxge_add_string("rx_vld_mcast_frms_%d\t\t",
812 &stat_size, data, i);
813 vxge_add_string("rx_vld_bcast_frms_%d\t\t",
814 &stat_size, data, i);
815 vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
816 &stat_size, data, i);
817 vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
818 &stat_size, data, i);
819 vxge_add_string("rx_tagged_frms_%d\t\t\t",
820 &stat_size, data, i);
821 vxge_add_string("rx_long_frms_%d\t\t\t",
822 &stat_size, data, i);
823 vxge_add_string("rx_usized_frms_%d\t\t\t",
824 &stat_size, data, i);
825 vxge_add_string("rx_osized_frms_%d\t\t\t",
826 &stat_size, data, i);
827 vxge_add_string("rx_frag_frms_%d\t\t\t",
828 &stat_size, data, i);
829 vxge_add_string("rx_jabber_frms_%d\t\t\t",
830 &stat_size, data, i);
831 vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
832 &stat_size, data, i);
833 vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
834 &stat_size, data, i);
835 vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
836 &stat_size, data, i);
837 vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
838 &stat_size, data, i);
839 vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
840 &stat_size, data, i);
841 vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
842 &stat_size, data, i);
843 vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
844 &stat_size, data, i);
845 vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
846 &stat_size, data, i);
847 vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
848 &stat_size, data, i);
849 vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
850 &stat_size, data, i);
851 vxge_add_string("rx_ip_%d\t\t\t\t",
852 &stat_size, data, i);
853 vxge_add_string("rx_accepted_ip_%d\t\t\t",
854 &stat_size, data, i);
855 vxge_add_string("rx_ip_octets_%d\t\t\t",
856 &stat_size, data, i);
857 vxge_add_string("rx_err_ip_%d\t\t\t",
858 &stat_size, data, i);
859 vxge_add_string("rx_icmp_%d\t\t\t\t",
860 &stat_size, data, i);
861 vxge_add_string("rx_tcp_%d\t\t\t\t",
862 &stat_size, data, i);
863 vxge_add_string("rx_udp_%d\t\t\t\t",
864 &stat_size, data, i);
865 vxge_add_string("rx_err_tcp_%d\t\t\t",
866 &stat_size, data, i);
867 vxge_add_string("rx_pause_count_%d\t\t\t",
868 &stat_size, data, i);
869 vxge_add_string("rx_pause_ctrl_frms_%d\t\t",
870 &stat_size, data, i);
871 vxge_add_string("rx_unsup_ctrl_frms_%d\t\t",
872 &stat_size, data, i);
873 vxge_add_string("rx_fcs_err_frms_%d\t\t\t",
874 &stat_size, data, i);
875 vxge_add_string("rx_in_rng_len_err_frms_%d\t\t",
876 &stat_size, data, i);
877 vxge_add_string("rx_out_rng_len_err_frms_%d\t\t",
878 &stat_size, data, i);
879 vxge_add_string("rx_drop_frms_%d\t\t\t",
880 &stat_size, data, i);
881 vxge_add_string("rx_discard_frms_%d\t\t\t",
882 &stat_size, data, i);
883 vxge_add_string("rx_drop_ip_%d\t\t\t",
884 &stat_size, data, i);
885 vxge_add_string("rx_drop_udp_%d\t\t\t",
886 &stat_size, data, i);
887 vxge_add_string("rx_marker_pdu_frms_%d\t\t",
888 &stat_size, data, i);
889 vxge_add_string("rx_lacpdu_frms_%d\t\t\t",
890 &stat_size, data, i);
891 vxge_add_string("rx_unknown_pdu_frms_%d\t\t",
892 &stat_size, data, i);
893 vxge_add_string("rx_marker_resp_pdu_frms_%d\t\t",
894 &stat_size, data, i);
895 vxge_add_string("rx_fcs_discard_%d\t\t\t",
896 &stat_size, data, i);
897 vxge_add_string("rx_illegal_pdu_frms_%d\t\t",
898 &stat_size, data, i);
899 vxge_add_string("rx_switch_discard_%d\t\t",
900 &stat_size, data, i);
901 vxge_add_string("rx_len_discard_%d\t\t\t",
902 &stat_size, data, i);
903 vxge_add_string("rx_rpa_discard_%d\t\t\t",
904 &stat_size, data, i);
905 vxge_add_string("rx_l2_mgmt_discard_%d\t\t",
906 &stat_size, data, i);
907 vxge_add_string("rx_rts_discard_%d\t\t\t",
908 &stat_size, data, i);
909 vxge_add_string("rx_trash_discard_%d\t\t\t",
910 &stat_size, data, i);
911 vxge_add_string("rx_buff_full_discard_%d\t\t",
912 &stat_size, data, i);
913 vxge_add_string("rx_red_discard_%d\t\t\t",
914 &stat_size, data, i);
915 vxge_add_string("rx_xgmii_ctrl_err_cnt_%d\t\t",
916 &stat_size, data, i);
917 vxge_add_string("rx_xgmii_data_err_cnt_%d\t\t",
918 &stat_size, data, i);
919 vxge_add_string("rx_xgmii_char1_match_%d\t\t",
920 &stat_size, data, i);
921 vxge_add_string("rx_xgmii_err_sym_%d\t\t\t",
922 &stat_size, data, i);
923 vxge_add_string("rx_xgmii_column1_match_%d\t\t",
924 &stat_size, data, i);
925 vxge_add_string("rx_xgmii_char2_match_%d\t\t",
926 &stat_size, data, i);
927 vxge_add_string("rx_local_fault_%d\t\t\t",
928 &stat_size, data, i);
929 vxge_add_string("rx_xgmii_column2_match_%d\t\t",
930 &stat_size, data, i);
931 vxge_add_string("rx_jettison_%d\t\t\t",
932 &stat_size, data, i);
933 vxge_add_string("rx_remote_fault_%d\t\t\t",
934 &stat_size, data, i);
935 }
936
937 vxge_add_string("\n SOFTWARE STATISTICS%s\t\t\t",
938 &stat_size, data, "");
939 for (i = 0; i < vdev->no_of_vpath; i++) {
940 vxge_add_string("soft_reset_cnt_%d\t\t\t",
941 &stat_size, data, i);
942 vxge_add_string("unknown_alarms_%d\t\t\t",
943 &stat_size, data, i);
944 vxge_add_string("network_sustained_fault_%d\t\t",
945 &stat_size, data, i);
946 vxge_add_string("network_sustained_ok_%d\t\t",
947 &stat_size, data, i);
948 vxge_add_string("kdfcctl_fifo0_overwrite_%d\t\t",
949 &stat_size, data, i);
950 vxge_add_string("kdfcctl_fifo0_poison_%d\t\t",
951 &stat_size, data, i);
952 vxge_add_string("kdfcctl_fifo0_dma_error_%d\t\t",
953 &stat_size, data, i);
954 vxge_add_string("dblgen_fifo0_overflow_%d\t\t",
955 &stat_size, data, i);
956 vxge_add_string("statsb_pif_chain_error_%d\t\t",
957 &stat_size, data, i);
958 vxge_add_string("statsb_drop_timeout_%d\t\t",
959 &stat_size, data, i);
960 vxge_add_string("target_illegal_access_%d\t\t",
961 &stat_size, data, i);
962 vxge_add_string("ini_serr_det_%d\t\t\t",
963 &stat_size, data, i);
964 vxge_add_string("prc_ring_bumps_%d\t\t\t",
965 &stat_size, data, i);
966 vxge_add_string("prc_rxdcm_sc_err_%d\t\t\t",
967 &stat_size, data, i);
968 vxge_add_string("prc_rxdcm_sc_abort_%d\t\t",
969 &stat_size, data, i);
970 vxge_add_string("prc_quanta_size_err_%d\t\t",
971 &stat_size, data, i);
972 vxge_add_string("ring_full_cnt_%d\t\t\t",
973 &stat_size, data, i);
974 vxge_add_string("ring_usage_cnt_%d\t\t\t",
975 &stat_size, data, i);
976 vxge_add_string("ring_usage_max_%d\t\t\t",
977 &stat_size, data, i);
978 vxge_add_string("ring_reserve_free_swaps_cnt_%d\t",
979 &stat_size, data, i);
980 vxge_add_string("ring_total_compl_cnt_%d\t\t",
981 &stat_size, data, i);
982 for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
983 vxge_add_string("rxd_t_code_err_cnt%d_%d\t\t",
984 &stat_size, data, j, i);
985 vxge_add_string("fifo_full_cnt_%d\t\t\t",
986 &stat_size, data, i);
987 vxge_add_string("fifo_usage_cnt_%d\t\t\t",
988 &stat_size, data, i);
989 vxge_add_string("fifo_usage_max_%d\t\t\t",
990 &stat_size, data, i);
991 vxge_add_string("fifo_reserve_free_swaps_cnt_%d\t",
992 &stat_size, data, i);
993 vxge_add_string("fifo_total_compl_cnt_%d\t\t",
994 &stat_size, data, i);
995 vxge_add_string("fifo_total_posts_%d\t\t\t",
996 &stat_size, data, i);
997 vxge_add_string("fifo_total_buffers_%d\t\t",
998 &stat_size, data, i);
999 for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
1000 vxge_add_string("txd_t_code_err_cnt%d_%d\t\t",
1001 &stat_size, data, j, i);
1002 }
1003
1004 vxge_add_string("\n HARDWARE STATISTICS%s\t\t\t",
1005 &stat_size, data, "");
1006 for (i = 0; i < vdev->no_of_vpath; i++) {
1007 vxge_add_string("ini_num_mwr_sent_%d\t\t\t",
1008 &stat_size, data, i);
1009 vxge_add_string("ini_num_mrd_sent_%d\t\t\t",
1010 &stat_size, data, i);
1011 vxge_add_string("ini_num_cpl_rcvd_%d\t\t\t",
1012 &stat_size, data, i);
1013 vxge_add_string("ini_num_mwr_byte_sent_%d\t\t",
1014 &stat_size, data, i);
1015 vxge_add_string("ini_num_cpl_byte_rcvd_%d\t\t",
1016 &stat_size, data, i);
1017 vxge_add_string("wrcrdtarb_xoff_%d\t\t\t",
1018 &stat_size, data, i);
1019 vxge_add_string("rdcrdtarb_xoff_%d\t\t\t",
1020 &stat_size, data, i);
1021 vxge_add_string("vpath_genstats_count0_%d\t\t",
1022 &stat_size, data, i);
1023 vxge_add_string("vpath_genstats_count1_%d\t\t",
1024 &stat_size, data, i);
1025 vxge_add_string("vpath_genstats_count2_%d\t\t",
1026 &stat_size, data, i);
1027 vxge_add_string("vpath_genstats_count3_%d\t\t",
1028 &stat_size, data, i);
1029 vxge_add_string("vpath_genstats_count4_%d\t\t",
1030 &stat_size, data, i);
1031 vxge_add_string("vpath_genstats_count5_%d\t\t",
1032 &stat_size, data, i);
1033 vxge_add_string("prog_event_vnum0_%d\t\t\t",
1034 &stat_size, data, i);
1035 vxge_add_string("prog_event_vnum1_%d\t\t\t",
1036 &stat_size, data, i);
1037 vxge_add_string("prog_event_vnum2_%d\t\t\t",
1038 &stat_size, data, i);
1039 vxge_add_string("prog_event_vnum3_%d\t\t\t",
1040 &stat_size, data, i);
1041 vxge_add_string("rx_multi_cast_frame_discard_%d\t",
1042 &stat_size, data, i);
1043 vxge_add_string("rx_frm_transferred_%d\t\t",
1044 &stat_size, data, i);
1045 vxge_add_string("rxd_returned_%d\t\t\t",
1046 &stat_size, data, i);
1047 vxge_add_string("rx_mpa_len_fail_frms_%d\t\t",
1048 &stat_size, data, i);
1049 vxge_add_string("rx_mpa_mrk_fail_frms_%d\t\t",
1050 &stat_size, data, i);
1051 vxge_add_string("rx_mpa_crc_fail_frms_%d\t\t",
1052 &stat_size, data, i);
1053 vxge_add_string("rx_permitted_frms_%d\t\t",
1054 &stat_size, data, i);
1055 vxge_add_string("rx_vp_reset_discarded_frms_%d\t",
1056 &stat_size, data, i);
1057 vxge_add_string("rx_wol_frms_%d\t\t\t",
1058 &stat_size, data, i);
1059 vxge_add_string("tx_vp_reset_discarded_frms_%d\t",
1060 &stat_size, data, i);
1061 }
1062
1063 memcpy(data + stat_size, &ethtool_driver_stats_keys,
1064 sizeof(ethtool_driver_stats_keys));
1065 }
1066}
1067
1068static int vxge_ethtool_get_regs_len(struct net_device *dev)
1069{
1070 struct vxgedev *vdev = netdev_priv(dev);
1071
1072 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
1073}
1074
1075static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1076{
1077 struct vxgedev *vdev = netdev_priv(dev);
1078
1079 switch (sset) {
1080 case ETH_SS_STATS:
1081 return VXGE_TITLE_LEN +
1082 (vdev->no_of_vpath * VXGE_HW_VPATH_STATS_LEN) +
1083 (vdev->max_config_port * VXGE_HW_AGGR_STATS_LEN) +
1084 (vdev->max_config_port * VXGE_HW_PORT_STATS_LEN) +
1085 (vdev->no_of_vpath * VXGE_HW_VPATH_TX_STATS_LEN) +
1086 (vdev->no_of_vpath * VXGE_HW_VPATH_RX_STATS_LEN) +
1087 (vdev->no_of_vpath * VXGE_SW_STATS_LEN) +
1088 DRIVER_STAT_LEN;
1089 default:
1090 return -EOPNOTSUPP;
1091 }
1092}
1093
1094static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
1095{
1096 struct vxgedev *vdev = netdev_priv(dev);
1097
1098 if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
1099 printk(KERN_INFO "Single Function Mode is required to flash the"
1100 " firmware\n");
1101 return -EINVAL;
1102 }
1103
1104 if (netif_running(dev)) {
1105 printk(KERN_INFO "Interface %s must be down to flash the "
1106 "firmware\n", dev->name);
1107 return -EBUSY;
1108 }
1109
1110 return vxge_fw_upgrade(vdev, parms->data, 1);
1111}
1112
1113static const struct ethtool_ops vxge_ethtool_ops = {
1114 .get_settings = vxge_ethtool_gset,
1115 .set_settings = vxge_ethtool_sset,
1116 .get_drvinfo = vxge_ethtool_gdrvinfo,
1117 .get_regs_len = vxge_ethtool_get_regs_len,
1118 .get_regs = vxge_ethtool_gregs,
1119 .get_link = ethtool_op_get_link,
1120 .get_pauseparam = vxge_ethtool_getpause_data,
1121 .set_pauseparam = vxge_ethtool_setpause_data,
1122 .get_strings = vxge_ethtool_get_strings,
1123 .set_phys_id = vxge_ethtool_idnic,
1124 .get_sset_count = vxge_ethtool_get_sset_count,
1125 .get_ethtool_stats = vxge_get_ethtool_stats,
1126 .flash_device = vxge_fw_flash,
1127};
1128
1129void vxge_initialize_ethtool_ops(struct net_device *ndev)
1130{
1131 SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
1132}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
new file mode 100644
index 000000000000..6cf3044d7f43
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
@@ -0,0 +1,67 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-ethtool.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef _VXGE_ETHTOOL_H
15#define _VXGE_ETHTOOL_H
16
17#include "vxge-main.h"
18
19/* Ethtool related variables and Macros. */
20static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset);
21
22static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
23 {"\n DRIVER STATISTICS"},
24 {"vpaths_opened"},
25 {"vpath_open_fail_cnt"},
26 {"link_up_cnt"},
27 {"link_down_cnt"},
28 {"tx_frms"},
29 {"tx_errors"},
30 {"tx_bytes"},
31 {"txd_not_free"},
32 {"txd_out_of_desc"},
33 {"rx_frms"},
34 {"rx_errors"},
35 {"rx_bytes"},
36 {"rx_mcast"},
37 {"pci_map_fail_cnt"},
38 {"skb_alloc_fail_cnt"}
39};
40
41#define VXGE_TITLE_LEN 5
42#define VXGE_HW_VPATH_STATS_LEN 27
43#define VXGE_HW_AGGR_STATS_LEN 13
44#define VXGE_HW_PORT_STATS_LEN 94
45#define VXGE_HW_VPATH_TX_STATS_LEN 19
46#define VXGE_HW_VPATH_RX_STATS_LEN 42
47#define VXGE_SW_STATS_LEN 60
48#define VXGE_HW_STATS_LEN (VXGE_HW_VPATH_STATS_LEN +\
49 VXGE_HW_AGGR_STATS_LEN +\
50 VXGE_HW_PORT_STATS_LEN +\
51 VXGE_HW_VPATH_TX_STATS_LEN +\
52 VXGE_HW_VPATH_RX_STATS_LEN)
53
54#define DRIVER_STAT_LEN (sizeof(ethtool_driver_stats_keys)/ETH_GSTRING_LEN)
55#define STAT_LEN (VXGE_HW_STATS_LEN + DRIVER_STAT_LEN + VXGE_SW_STATS_LEN)
56
57/* Maximum flicker time of adapter LED */
58#define VXGE_MAX_FLICKER_TIME (60 * HZ) /* 60 seconds */
59#define VXGE_FLICKER_ON 1
60#define VXGE_FLICKER_OFF 0
61
62#define vxge_add_string(fmt, size, buf, ...) {\
63 snprintf(buf + *size, ETH_GSTRING_LEN, fmt, __VA_ARGS__); \
64 *size += ETH_GSTRING_LEN; \
65}
66
67#endif /*_VXGE_ETHTOOL_H*/
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
new file mode 100644
index 000000000000..178348a258d2
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -0,0 +1,4854 @@
1/******************************************************************************
2* This software may be used and distributed according to the terms of
3* the GNU General Public License (GPL), incorporated herein by reference.
4* Drivers based on or derived from this code fall under the GPL and must
5* retain the authorship, copyright and license notice. This file is not
6* a complete program and may only be used when the entire operating
7* system is licensed under the GPL.
8* See the file COPYING in this distribution for more information.
9*
10* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11* Virtualized Server Adapter.
12* Copyright(c) 2002-2010 Exar Corp.
13*
14* The module loadable parameters that are supported by the driver and a brief
15* explanation of all the variables:
16* vlan_tag_strip:
17* Strip VLAN Tag enable/disable. Instructs the device to remove
18* the VLAN tag from all received tagged frames that are not
19* replicated at the internal L2 switch.
20* 0 - Do not strip the VLAN tag.
21* 1 - Strip the VLAN tag.
22*
23* addr_learn_en:
24* Enable learning the mac address of the guest OS interface in
25* a virtualization environment.
26* 0 - DISABLE
27* 1 - ENABLE
28*
29* max_config_port:
30* Maximum number of port to be supported.
31* MIN -1 and MAX - 2
32*
33* max_config_vpath:
34* This configures the maximum no of VPATH configures for each
35* device function.
36* MIN - 1 and MAX - 17
37*
38* max_config_dev:
39* This configures maximum no of Device function to be enabled.
40* MIN - 1 and MAX - 17
41*
42******************************************************************************/
43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46#include <linux/bitops.h>
47#include <linux/if_vlan.h>
48#include <linux/interrupt.h>
49#include <linux/pci.h>
50#include <linux/slab.h>
51#include <linux/tcp.h>
52#include <net/ip.h>
53#include <linux/netdevice.h>
54#include <linux/etherdevice.h>
55#include <linux/firmware.h>
56#include <linux/net_tstamp.h>
57#include <linux/prefetch.h>
58#include "vxge-main.h"
59#include "vxge-reg.h"
60
61MODULE_LICENSE("Dual BSD/GPL");
62MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
63 "Virtualized Server Adapter");
64
65static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
66 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
67 PCI_ANY_ID},
68 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
69 PCI_ANY_ID},
70 {0}
71};
72
73MODULE_DEVICE_TABLE(pci, vxge_id_table);
74
75VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
76VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
77VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
78VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
79VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
80VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
81
82static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
83 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
84static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
85 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
86module_param_array(bw_percentage, uint, NULL, 0);
87
88static struct vxge_drv_config *driver_config;
89
90static inline int is_vxge_card_up(struct vxgedev *vdev)
91{
92 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
93}
94
95static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
96{
97 struct sk_buff **skb_ptr = NULL;
98 struct sk_buff **temp;
99#define NR_SKB_COMPLETED 128
100 struct sk_buff *completed[NR_SKB_COMPLETED];
101 int more;
102
103 do {
104 more = 0;
105 skb_ptr = completed;
106
107 if (__netif_tx_trylock(fifo->txq)) {
108 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
109 NR_SKB_COMPLETED, &more);
110 __netif_tx_unlock(fifo->txq);
111 }
112
113 /* free SKBs */
114 for (temp = completed; temp != skb_ptr; temp++)
115 dev_kfree_skb_irq(*temp);
116 } while (more);
117}
118
119static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
120{
121 int i;
122
123 /* Complete all transmits */
124 for (i = 0; i < vdev->no_of_vpath; i++)
125 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
126}
127
128static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
129{
130 int i;
131 struct vxge_ring *ring;
132
133 /* Complete all receives*/
134 for (i = 0; i < vdev->no_of_vpath; i++) {
135 ring = &vdev->vpaths[i].ring;
136 vxge_hw_vpath_poll_rx(ring->handle);
137 }
138}
139
140/*
141 * vxge_callback_link_up
142 *
143 * This function is called during interrupt context to notify link up state
144 * change.
145 */
146static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
147{
148 struct net_device *dev = hldev->ndev;
149 struct vxgedev *vdev = netdev_priv(dev);
150
151 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
152 vdev->ndev->name, __func__, __LINE__);
153 netdev_notice(vdev->ndev, "Link Up\n");
154 vdev->stats.link_up++;
155
156 netif_carrier_on(vdev->ndev);
157 netif_tx_wake_all_queues(vdev->ndev);
158
159 vxge_debug_entryexit(VXGE_TRACE,
160 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
161}
162
163/*
164 * vxge_callback_link_down
165 *
166 * This function is called during interrupt context to notify link down state
167 * change.
168 */
169static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
170{
171 struct net_device *dev = hldev->ndev;
172 struct vxgedev *vdev = netdev_priv(dev);
173
174 vxge_debug_entryexit(VXGE_TRACE,
175 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
176 netdev_notice(vdev->ndev, "Link Down\n");
177
178 vdev->stats.link_down++;
179 netif_carrier_off(vdev->ndev);
180 netif_tx_stop_all_queues(vdev->ndev);
181
182 vxge_debug_entryexit(VXGE_TRACE,
183 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
184}
185
186/*
187 * vxge_rx_alloc
188 *
189 * Allocate SKB.
190 */
191static struct sk_buff *
192vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
193{
194 struct net_device *dev;
195 struct sk_buff *skb;
196 struct vxge_rx_priv *rx_priv;
197
198 dev = ring->ndev;
199 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
200 ring->ndev->name, __func__, __LINE__);
201
202 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
203
204 /* try to allocate skb first. this one may fail */
205 skb = netdev_alloc_skb(dev, skb_size +
206 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
207 if (skb == NULL) {
208 vxge_debug_mem(VXGE_ERR,
209 "%s: out of memory to allocate SKB", dev->name);
210 ring->stats.skb_alloc_fail++;
211 return NULL;
212 }
213
214 vxge_debug_mem(VXGE_TRACE,
215 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
216 __func__, __LINE__, skb);
217
218 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
219
220 rx_priv->skb = skb;
221 rx_priv->skb_data = NULL;
222 rx_priv->data_size = skb_size;
223 vxge_debug_entryexit(VXGE_TRACE,
224 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
225
226 return skb;
227}
228
229/*
230 * vxge_rx_map
231 */
232static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
233{
234 struct vxge_rx_priv *rx_priv;
235 dma_addr_t dma_addr;
236
237 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
238 ring->ndev->name, __func__, __LINE__);
239 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
240
241 rx_priv->skb_data = rx_priv->skb->data;
242 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
243 rx_priv->data_size, PCI_DMA_FROMDEVICE);
244
245 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
246 ring->stats.pci_map_fail++;
247 return -EIO;
248 }
249 vxge_debug_mem(VXGE_TRACE,
250 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
251 ring->ndev->name, __func__, __LINE__,
252 (unsigned long long)dma_addr);
253 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
254
255 rx_priv->data_dma = dma_addr;
256 vxge_debug_entryexit(VXGE_TRACE,
257 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
258
259 return 0;
260}
261
262/*
263 * vxge_rx_initial_replenish
264 * Allocation of RxD as an initial replenish procedure.
265 */
266static enum vxge_hw_status
267vxge_rx_initial_replenish(void *dtrh, void *userdata)
268{
269 struct vxge_ring *ring = (struct vxge_ring *)userdata;
270 struct vxge_rx_priv *rx_priv;
271
272 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
273 ring->ndev->name, __func__, __LINE__);
274 if (vxge_rx_alloc(dtrh, ring,
275 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
276 return VXGE_HW_FAIL;
277
278 if (vxge_rx_map(dtrh, ring)) {
279 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
280 dev_kfree_skb(rx_priv->skb);
281
282 return VXGE_HW_FAIL;
283 }
284 vxge_debug_entryexit(VXGE_TRACE,
285 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
286
287 return VXGE_HW_OK;
288}
289
290static inline void
291vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
292 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
293{
294
295 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
296 ring->ndev->name, __func__, __LINE__);
297 skb_record_rx_queue(skb, ring->driver_id);
298 skb->protocol = eth_type_trans(skb, ring->ndev);
299
300 u64_stats_update_begin(&ring->stats.syncp);
301 ring->stats.rx_frms++;
302 ring->stats.rx_bytes += pkt_length;
303
304 if (skb->pkt_type == PACKET_MULTICAST)
305 ring->stats.rx_mcast++;
306 u64_stats_update_end(&ring->stats.syncp);
307
308 vxge_debug_rx(VXGE_TRACE,
309 "%s: %s:%d skb protocol = %d",
310 ring->ndev->name, __func__, __LINE__, skb->protocol);
311
312 if (ext_info->vlan &&
313 ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
314 __vlan_hwaccel_put_tag(skb, ext_info->vlan);
315 napi_gro_receive(ring->napi_p, skb);
316
317 vxge_debug_entryexit(VXGE_TRACE,
318 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
319}
320
321static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
322 struct vxge_rx_priv *rx_priv)
323{
324 pci_dma_sync_single_for_device(ring->pdev,
325 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
326
327 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
328 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
329}
330
331static inline void vxge_post(int *dtr_cnt, void **first_dtr,
332 void *post_dtr, struct __vxge_hw_ring *ringh)
333{
334 int dtr_count = *dtr_cnt;
335 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
336 if (*first_dtr)
337 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
338 *first_dtr = post_dtr;
339 } else
340 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
341 dtr_count++;
342 *dtr_cnt = dtr_count;
343}
344
345/*
346 * vxge_rx_1b_compl
347 *
348 * If the interrupt is because of a received frame or if the receive ring
349 * contains fresh as yet un-processed frames, this function is called.
350 */
351static enum vxge_hw_status
352vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
353 u8 t_code, void *userdata)
354{
355 struct vxge_ring *ring = (struct vxge_ring *)userdata;
356 struct net_device *dev = ring->ndev;
357 unsigned int dma_sizes;
358 void *first_dtr = NULL;
359 int dtr_cnt = 0;
360 int data_size;
361 dma_addr_t data_dma;
362 int pkt_length;
363 struct sk_buff *skb;
364 struct vxge_rx_priv *rx_priv;
365 struct vxge_hw_ring_rxd_info ext_info;
366 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
367 ring->ndev->name, __func__, __LINE__);
368
369 do {
370 prefetch((char *)dtr + L1_CACHE_BYTES);
371 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
372 skb = rx_priv->skb;
373 data_size = rx_priv->data_size;
374 data_dma = rx_priv->data_dma;
375 prefetch(rx_priv->skb_data);
376
377 vxge_debug_rx(VXGE_TRACE,
378 "%s: %s:%d skb = 0x%p",
379 ring->ndev->name, __func__, __LINE__, skb);
380
381 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
382 pkt_length = dma_sizes;
383
384 pkt_length -= ETH_FCS_LEN;
385
386 vxge_debug_rx(VXGE_TRACE,
387 "%s: %s:%d Packet Length = %d",
388 ring->ndev->name, __func__, __LINE__, pkt_length);
389
390 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
391
392 /* check skb validity */
393 vxge_assert(skb);
394
395 prefetch((char *)skb + L1_CACHE_BYTES);
396 if (unlikely(t_code)) {
397 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
398 VXGE_HW_OK) {
399
400 ring->stats.rx_errors++;
401 vxge_debug_rx(VXGE_TRACE,
402 "%s: %s :%d Rx T_code is %d",
403 ring->ndev->name, __func__,
404 __LINE__, t_code);
405
406 /* If the t_code is not supported and if the
407 * t_code is other than 0x5 (unparseable packet
408 * such as unknown UPV6 header), Drop it !!!
409 */
410 vxge_re_pre_post(dtr, ring, rx_priv);
411
412 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
413 ring->stats.rx_dropped++;
414 continue;
415 }
416 }
417
418 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
419 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
420 if (!vxge_rx_map(dtr, ring)) {
421 skb_put(skb, pkt_length);
422
423 pci_unmap_single(ring->pdev, data_dma,
424 data_size, PCI_DMA_FROMDEVICE);
425
426 vxge_hw_ring_rxd_pre_post(ringh, dtr);
427 vxge_post(&dtr_cnt, &first_dtr, dtr,
428 ringh);
429 } else {
430 dev_kfree_skb(rx_priv->skb);
431 rx_priv->skb = skb;
432 rx_priv->data_size = data_size;
433 vxge_re_pre_post(dtr, ring, rx_priv);
434
435 vxge_post(&dtr_cnt, &first_dtr, dtr,
436 ringh);
437 ring->stats.rx_dropped++;
438 break;
439 }
440 } else {
441 vxge_re_pre_post(dtr, ring, rx_priv);
442
443 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
444 ring->stats.rx_dropped++;
445 break;
446 }
447 } else {
448 struct sk_buff *skb_up;
449
450 skb_up = netdev_alloc_skb(dev, pkt_length +
451 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
452 if (skb_up != NULL) {
453 skb_reserve(skb_up,
454 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
455
456 pci_dma_sync_single_for_cpu(ring->pdev,
457 data_dma, data_size,
458 PCI_DMA_FROMDEVICE);
459
460 vxge_debug_mem(VXGE_TRACE,
461 "%s: %s:%d skb_up = %p",
462 ring->ndev->name, __func__,
463 __LINE__, skb);
464 memcpy(skb_up->data, skb->data, pkt_length);
465
466 vxge_re_pre_post(dtr, ring, rx_priv);
467
468 vxge_post(&dtr_cnt, &first_dtr, dtr,
469 ringh);
470 /* will netif_rx small SKB instead */
471 skb = skb_up;
472 skb_put(skb, pkt_length);
473 } else {
474 vxge_re_pre_post(dtr, ring, rx_priv);
475
476 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
477 vxge_debug_rx(VXGE_ERR,
478 "%s: vxge_rx_1b_compl: out of "
479 "memory", dev->name);
480 ring->stats.skb_alloc_fail++;
481 break;
482 }
483 }
484
485 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
486 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
487 (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
488 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
489 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
490 skb->ip_summed = CHECKSUM_UNNECESSARY;
491 else
492 skb_checksum_none_assert(skb);
493
494
495 if (ring->rx_hwts) {
496 struct skb_shared_hwtstamps *skb_hwts;
497 u32 ns = *(u32 *)(skb->head + pkt_length);
498
499 skb_hwts = skb_hwtstamps(skb);
500 skb_hwts->hwtstamp = ns_to_ktime(ns);
501 skb_hwts->syststamp.tv64 = 0;
502 }
503
504 /* rth_hash_type and rth_it_hit are non-zero regardless of
505 * whether rss is enabled. Only the rth_value is zero/non-zero
506 * if rss is disabled/enabled, so key off of that.
507 */
508 if (ext_info.rth_value)
509 skb->rxhash = ext_info.rth_value;
510
511 vxge_rx_complete(ring, skb, ext_info.vlan,
512 pkt_length, &ext_info);
513
514 ring->budget--;
515 ring->pkts_processed++;
516 if (!ring->budget)
517 break;
518
519 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
520 &t_code) == VXGE_HW_OK);
521
522 if (first_dtr)
523 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
524
525 vxge_debug_entryexit(VXGE_TRACE,
526 "%s:%d Exiting...",
527 __func__, __LINE__);
528 return VXGE_HW_OK;
529}
530
531/*
532 * vxge_xmit_compl
533 *
534 * If an interrupt was raised to indicate DMA complete of the Tx packet,
535 * this function is called. It identifies the last TxD whose buffer was
536 * freed and frees all skbs whose data have already DMA'ed into the NICs
537 * internal memory.
538 */
539static enum vxge_hw_status
540vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
541 enum vxge_hw_fifo_tcode t_code, void *userdata,
542 struct sk_buff ***skb_ptr, int nr_skb, int *more)
543{
544 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
545 struct sk_buff *skb, **done_skb = *skb_ptr;
546 int pkt_cnt = 0;
547
548 vxge_debug_entryexit(VXGE_TRACE,
549 "%s:%d Entered....", __func__, __LINE__);
550
551 do {
552 int frg_cnt;
553 skb_frag_t *frag;
554 int i = 0, j;
555 struct vxge_tx_priv *txd_priv =
556 vxge_hw_fifo_txdl_private_get(dtr);
557
558 skb = txd_priv->skb;
559 frg_cnt = skb_shinfo(skb)->nr_frags;
560 frag = &skb_shinfo(skb)->frags[0];
561
562 vxge_debug_tx(VXGE_TRACE,
563 "%s: %s:%d fifo_hw = %p dtr = %p "
564 "tcode = 0x%x", fifo->ndev->name, __func__,
565 __LINE__, fifo_hw, dtr, t_code);
566 /* check skb validity */
567 vxge_assert(skb);
568 vxge_debug_tx(VXGE_TRACE,
569 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
570 fifo->ndev->name, __func__, __LINE__,
571 skb, txd_priv, frg_cnt);
572 if (unlikely(t_code)) {
573 fifo->stats.tx_errors++;
574 vxge_debug_tx(VXGE_ERR,
575 "%s: tx: dtr %p completed due to "
576 "error t_code %01x", fifo->ndev->name,
577 dtr, t_code);
578 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
579 }
580
581 /* for unfragmented skb */
582 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
583 skb_headlen(skb), PCI_DMA_TODEVICE);
584
585 for (j = 0; j < frg_cnt; j++) {
586 pci_unmap_page(fifo->pdev,
587 txd_priv->dma_buffers[i++],
588 frag->size, PCI_DMA_TODEVICE);
589 frag += 1;
590 }
591
592 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
593
594 /* Updating the statistics block */
595 u64_stats_update_begin(&fifo->stats.syncp);
596 fifo->stats.tx_frms++;
597 fifo->stats.tx_bytes += skb->len;
598 u64_stats_update_end(&fifo->stats.syncp);
599
600 *done_skb++ = skb;
601
602 if (--nr_skb <= 0) {
603 *more = 1;
604 break;
605 }
606
607 pkt_cnt++;
608 if (pkt_cnt > fifo->indicate_max_pkts)
609 break;
610
611 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
612 &dtr, &t_code) == VXGE_HW_OK);
613
614 *skb_ptr = done_skb;
615 if (netif_tx_queue_stopped(fifo->txq))
616 netif_tx_wake_queue(fifo->txq);
617
618 vxge_debug_entryexit(VXGE_TRACE,
619 "%s: %s:%d Exiting...",
620 fifo->ndev->name, __func__, __LINE__);
621 return VXGE_HW_OK;
622}
623
624/* select a vpath to transmit the packet */
625static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
626{
627 u16 queue_len, counter = 0;
628 if (skb->protocol == htons(ETH_P_IP)) {
629 struct iphdr *ip;
630 struct tcphdr *th;
631
632 ip = ip_hdr(skb);
633
634 if (!ip_is_fragment(ip)) {
635 th = (struct tcphdr *)(((unsigned char *)ip) +
636 ip->ihl*4);
637
638 queue_len = vdev->no_of_vpath;
639 counter = (ntohs(th->source) +
640 ntohs(th->dest)) &
641 vdev->vpath_selector[queue_len - 1];
642 if (counter >= queue_len)
643 counter = queue_len - 1;
644 }
645 }
646 return counter;
647}
648
649static enum vxge_hw_status vxge_search_mac_addr_in_list(
650 struct vxge_vpath *vpath, u64 del_mac)
651{
652 struct list_head *entry, *next;
653 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
654 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
655 return TRUE;
656 }
657 return FALSE;
658}
659
660static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
661{
662 struct vxge_mac_addrs *new_mac_entry;
663 u8 *mac_address = NULL;
664
665 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
666 return TRUE;
667
668 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
669 if (!new_mac_entry) {
670 vxge_debug_mem(VXGE_ERR,
671 "%s: memory allocation failed",
672 VXGE_DRIVER_NAME);
673 return FALSE;
674 }
675
676 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
677
678 /* Copy the new mac address to the list */
679 mac_address = (u8 *)&new_mac_entry->macaddr;
680 memcpy(mac_address, mac->macaddr, ETH_ALEN);
681
682 new_mac_entry->state = mac->state;
683 vpath->mac_addr_cnt++;
684
685 if (is_multicast_ether_addr(mac->macaddr))
686 vpath->mcast_addr_cnt++;
687
688 return TRUE;
689}
690
691/* Add a mac address to DA table */
692static enum vxge_hw_status
693vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
694{
695 enum vxge_hw_status status = VXGE_HW_OK;
696 struct vxge_vpath *vpath;
697 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
698
699 if (is_multicast_ether_addr(mac->macaddr))
700 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
701 else
702 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
703
704 vpath = &vdev->vpaths[mac->vpath_no];
705 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
706 mac->macmask, duplicate_mode);
707 if (status != VXGE_HW_OK) {
708 vxge_debug_init(VXGE_ERR,
709 "DA config add entry failed for vpath:%d",
710 vpath->device_id);
711 } else
712 if (FALSE == vxge_mac_list_add(vpath, mac))
713 status = -EPERM;
714
715 return status;
716}
717
718static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
719{
720 struct macInfo mac_info;
721 u8 *mac_address = NULL;
722 u64 mac_addr = 0, vpath_vector = 0;
723 int vpath_idx = 0;
724 enum vxge_hw_status status = VXGE_HW_OK;
725 struct vxge_vpath *vpath = NULL;
726 struct __vxge_hw_device *hldev;
727
728 hldev = pci_get_drvdata(vdev->pdev);
729
730 mac_address = (u8 *)&mac_addr;
731 memcpy(mac_address, mac_header, ETH_ALEN);
732
733 /* Is this mac address already in the list? */
734 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
735 vpath = &vdev->vpaths[vpath_idx];
736 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
737 return vpath_idx;
738 }
739
740 memset(&mac_info, 0, sizeof(struct macInfo));
741 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
742
743 /* Any vpath has room to add mac address to its da table? */
744 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
745 vpath = &vdev->vpaths[vpath_idx];
746 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
747 /* Add this mac address to this vpath */
748 mac_info.vpath_no = vpath_idx;
749 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
750 status = vxge_add_mac_addr(vdev, &mac_info);
751 if (status != VXGE_HW_OK)
752 return -EPERM;
753 return vpath_idx;
754 }
755 }
756
757 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
758 vpath_idx = 0;
759 mac_info.vpath_no = vpath_idx;
760 /* Is the first vpath already selected as catch-basin ? */
761 vpath = &vdev->vpaths[vpath_idx];
762 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
763 /* Add this mac address to this vpath */
764 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
765 return -EPERM;
766 return vpath_idx;
767 }
768
769 /* Select first vpath as catch-basin */
770 vpath_vector = vxge_mBIT(vpath->device_id);
771 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
772 vxge_hw_mgmt_reg_type_mrpcim,
773 0,
774 (ulong)offsetof(
775 struct vxge_hw_mrpcim_reg,
776 rts_mgr_cbasin_cfg),
777 vpath_vector);
778 if (status != VXGE_HW_OK) {
779 vxge_debug_tx(VXGE_ERR,
780 "%s: Unable to set the vpath-%d in catch-basin mode",
781 VXGE_DRIVER_NAME, vpath->device_id);
782 return -EPERM;
783 }
784
785 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
786 return -EPERM;
787
788 return vpath_idx;
789}
790
791/**
792 * vxge_xmit
793 * @skb : the socket buffer containing the Tx data.
794 * @dev : device pointer.
795 *
796 * This function is the Tx entry point of the driver. Neterion NIC supports
797 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
798*/
799static netdev_tx_t
800vxge_xmit(struct sk_buff *skb, struct net_device *dev)
801{
802 struct vxge_fifo *fifo = NULL;
803 void *dtr_priv;
804 void *dtr = NULL;
805 struct vxgedev *vdev = NULL;
806 enum vxge_hw_status status;
807 int frg_cnt, first_frg_len;
808 skb_frag_t *frag;
809 int i = 0, j = 0, avail;
810 u64 dma_pointer;
811 struct vxge_tx_priv *txdl_priv = NULL;
812 struct __vxge_hw_fifo *fifo_hw;
813 int offload_type;
814 int vpath_no = 0;
815
816 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
817 dev->name, __func__, __LINE__);
818
819 /* A buffer with no data will be dropped */
820 if (unlikely(skb->len <= 0)) {
821 vxge_debug_tx(VXGE_ERR,
822 "%s: Buffer has no data..", dev->name);
823 dev_kfree_skb(skb);
824 return NETDEV_TX_OK;
825 }
826
827 vdev = netdev_priv(dev);
828
829 if (unlikely(!is_vxge_card_up(vdev))) {
830 vxge_debug_tx(VXGE_ERR,
831 "%s: vdev not initialized", dev->name);
832 dev_kfree_skb(skb);
833 return NETDEV_TX_OK;
834 }
835
836 if (vdev->config.addr_learn_en) {
837 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
838 if (vpath_no == -EPERM) {
839 vxge_debug_tx(VXGE_ERR,
840 "%s: Failed to store the mac address",
841 dev->name);
842 dev_kfree_skb(skb);
843 return NETDEV_TX_OK;
844 }
845 }
846
847 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
848 vpath_no = skb_get_queue_mapping(skb);
849 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
850 vpath_no = vxge_get_vpath_no(vdev, skb);
851
852 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
853
854 if (vpath_no >= vdev->no_of_vpath)
855 vpath_no = 0;
856
857 fifo = &vdev->vpaths[vpath_no].fifo;
858 fifo_hw = fifo->handle;
859
860 if (netif_tx_queue_stopped(fifo->txq))
861 return NETDEV_TX_BUSY;
862
863 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
864 if (avail == 0) {
865 vxge_debug_tx(VXGE_ERR,
866 "%s: No free TXDs available", dev->name);
867 fifo->stats.txd_not_free++;
868 goto _exit0;
869 }
870
871 /* Last TXD? Stop tx queue to avoid dropping packets. TX
872 * completion will resume the queue.
873 */
874 if (avail == 1)
875 netif_tx_stop_queue(fifo->txq);
876
877 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
878 if (unlikely(status != VXGE_HW_OK)) {
879 vxge_debug_tx(VXGE_ERR,
880 "%s: Out of descriptors .", dev->name);
881 fifo->stats.txd_out_of_desc++;
882 goto _exit0;
883 }
884
885 vxge_debug_tx(VXGE_TRACE,
886 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
887 dev->name, __func__, __LINE__,
888 fifo_hw, dtr, dtr_priv);
889
890 if (vlan_tx_tag_present(skb)) {
891 u16 vlan_tag = vlan_tx_tag_get(skb);
892 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
893 }
894
895 first_frg_len = skb_headlen(skb);
896
897 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
898 PCI_DMA_TODEVICE);
899
900 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
901 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
902 fifo->stats.pci_map_fail++;
903 goto _exit0;
904 }
905
906 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
907 txdl_priv->skb = skb;
908 txdl_priv->dma_buffers[j] = dma_pointer;
909
910 frg_cnt = skb_shinfo(skb)->nr_frags;
911 vxge_debug_tx(VXGE_TRACE,
912 "%s: %s:%d skb = %p txdl_priv = %p "
913 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
914 __func__, __LINE__, skb, txdl_priv,
915 frg_cnt, (unsigned long long)dma_pointer);
916
917 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
918 first_frg_len);
919
920 frag = &skb_shinfo(skb)->frags[0];
921 for (i = 0; i < frg_cnt; i++) {
922 /* ignore 0 length fragment */
923 if (!frag->size)
924 continue;
925
926 dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
927 frag->page_offset, frag->size,
928 PCI_DMA_TODEVICE);
929
930 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
931 goto _exit2;
932 vxge_debug_tx(VXGE_TRACE,
933 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
934 dev->name, __func__, __LINE__, i,
935 (unsigned long long)dma_pointer);
936
937 txdl_priv->dma_buffers[j] = dma_pointer;
938 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
939 frag->size);
940 frag += 1;
941 }
942
943 offload_type = vxge_offload_type(skb);
944
945 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
946 int mss = vxge_tcp_mss(skb);
947 if (mss) {
948 vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
949 dev->name, __func__, __LINE__, mss);
950 vxge_hw_fifo_txdl_mss_set(dtr, mss);
951 } else {
952 vxge_assert(skb->len <=
953 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
954 vxge_assert(0);
955 goto _exit1;
956 }
957 }
958
959 if (skb->ip_summed == CHECKSUM_PARTIAL)
960 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
961 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
962 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
963 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
964
965 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
966
967 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
968 dev->name, __func__, __LINE__);
969 return NETDEV_TX_OK;
970
971_exit2:
972 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
973_exit1:
974 j = 0;
975 frag = &skb_shinfo(skb)->frags[0];
976
977 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
978 skb_headlen(skb), PCI_DMA_TODEVICE);
979
980 for (; j < i; j++) {
981 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
982 frag->size, PCI_DMA_TODEVICE);
983 frag += 1;
984 }
985
986 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
987_exit0:
988 netif_tx_stop_queue(fifo->txq);
989 dev_kfree_skb(skb);
990
991 return NETDEV_TX_OK;
992}
993
994/*
995 * vxge_rx_term
996 *
997 * Function will be called by hw function to abort all outstanding receive
998 * descriptors.
999 */
1000static void
1001vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1002{
1003 struct vxge_ring *ring = (struct vxge_ring *)userdata;
1004 struct vxge_rx_priv *rx_priv =
1005 vxge_hw_ring_rxd_private_get(dtrh);
1006
1007 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1008 ring->ndev->name, __func__, __LINE__);
1009 if (state != VXGE_HW_RXD_STATE_POSTED)
1010 return;
1011
1012 pci_unmap_single(ring->pdev, rx_priv->data_dma,
1013 rx_priv->data_size, PCI_DMA_FROMDEVICE);
1014
1015 dev_kfree_skb(rx_priv->skb);
1016 rx_priv->skb_data = NULL;
1017
1018 vxge_debug_entryexit(VXGE_TRACE,
1019 "%s: %s:%d Exiting...",
1020 ring->ndev->name, __func__, __LINE__);
1021}
1022
1023/*
1024 * vxge_tx_term
1025 *
1026 * Function will be called to abort all outstanding tx descriptors
1027 */
1028static void
1029vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1030{
1031 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1032 skb_frag_t *frag;
1033 int i = 0, j, frg_cnt;
1034 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1035 struct sk_buff *skb = txd_priv->skb;
1036
1037 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1038
1039 if (state != VXGE_HW_TXDL_STATE_POSTED)
1040 return;
1041
1042 /* check skb validity */
1043 vxge_assert(skb);
1044 frg_cnt = skb_shinfo(skb)->nr_frags;
1045 frag = &skb_shinfo(skb)->frags[0];
1046
1047 /* for unfragmented skb */
1048 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1049 skb_headlen(skb), PCI_DMA_TODEVICE);
1050
1051 for (j = 0; j < frg_cnt; j++) {
1052 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1053 frag->size, PCI_DMA_TODEVICE);
1054 frag += 1;
1055 }
1056
1057 dev_kfree_skb(skb);
1058
1059 vxge_debug_entryexit(VXGE_TRACE,
1060 "%s:%d Exiting...", __func__, __LINE__);
1061}
1062
1063static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1064{
1065 struct list_head *entry, *next;
1066 u64 del_mac = 0;
1067 u8 *mac_address = (u8 *) (&del_mac);
1068
1069 /* Copy the mac address to delete from the list */
1070 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1071
1072 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1073 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1074 list_del(entry);
1075 kfree((struct vxge_mac_addrs *)entry);
1076 vpath->mac_addr_cnt--;
1077
1078 if (is_multicast_ether_addr(mac->macaddr))
1079 vpath->mcast_addr_cnt--;
1080 return TRUE;
1081 }
1082 }
1083
1084 return FALSE;
1085}
1086
1087/* delete a mac address from DA table */
1088static enum vxge_hw_status
1089vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1090{
1091 enum vxge_hw_status status = VXGE_HW_OK;
1092 struct vxge_vpath *vpath;
1093
1094 vpath = &vdev->vpaths[mac->vpath_no];
1095 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1096 mac->macmask);
1097 if (status != VXGE_HW_OK) {
1098 vxge_debug_init(VXGE_ERR,
1099 "DA config delete entry failed for vpath:%d",
1100 vpath->device_id);
1101 } else
1102 vxge_mac_list_del(vpath, mac);
1103 return status;
1104}
1105
1106/**
1107 * vxge_set_multicast
1108 * @dev: pointer to the device structure
1109 *
1110 * Entry point for multicast address enable/disable
1111 * This function is a driver entry point which gets called by the kernel
1112 * whenever multicast addresses must be enabled/disabled. This also gets
1113 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1114 * determine, if multicast address must be enabled or if promiscuous mode
1115 * is to be disabled etc.
1116 */
1117static void vxge_set_multicast(struct net_device *dev)
1118{
1119 struct netdev_hw_addr *ha;
1120 struct vxgedev *vdev;
1121 int i, mcast_cnt = 0;
1122 struct __vxge_hw_device *hldev;
1123 struct vxge_vpath *vpath;
1124 enum vxge_hw_status status = VXGE_HW_OK;
1125 struct macInfo mac_info;
1126 int vpath_idx = 0;
1127 struct vxge_mac_addrs *mac_entry;
1128 struct list_head *list_head;
1129 struct list_head *entry, *next;
1130 u8 *mac_address = NULL;
1131
1132 vxge_debug_entryexit(VXGE_TRACE,
1133 "%s:%d", __func__, __LINE__);
1134
1135 vdev = netdev_priv(dev);
1136 hldev = (struct __vxge_hw_device *)vdev->devh;
1137
1138 if (unlikely(!is_vxge_card_up(vdev)))
1139 return;
1140
1141 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1142 for (i = 0; i < vdev->no_of_vpath; i++) {
1143 vpath = &vdev->vpaths[i];
1144 vxge_assert(vpath->is_open);
1145 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1146 if (status != VXGE_HW_OK)
1147 vxge_debug_init(VXGE_ERR, "failed to enable "
1148 "multicast, status %d", status);
1149 vdev->all_multi_flg = 1;
1150 }
1151 } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1152 for (i = 0; i < vdev->no_of_vpath; i++) {
1153 vpath = &vdev->vpaths[i];
1154 vxge_assert(vpath->is_open);
1155 status = vxge_hw_vpath_mcast_disable(vpath->handle);
1156 if (status != VXGE_HW_OK)
1157 vxge_debug_init(VXGE_ERR, "failed to disable "
1158 "multicast, status %d", status);
1159 vdev->all_multi_flg = 0;
1160 }
1161 }
1162
1163
1164 if (!vdev->config.addr_learn_en) {
1165 for (i = 0; i < vdev->no_of_vpath; i++) {
1166 vpath = &vdev->vpaths[i];
1167 vxge_assert(vpath->is_open);
1168
1169 if (dev->flags & IFF_PROMISC)
1170 status = vxge_hw_vpath_promisc_enable(
1171 vpath->handle);
1172 else
1173 status = vxge_hw_vpath_promisc_disable(
1174 vpath->handle);
1175 if (status != VXGE_HW_OK)
1176 vxge_debug_init(VXGE_ERR, "failed to %s promisc"
1177 ", status %d", dev->flags&IFF_PROMISC ?
1178 "enable" : "disable", status);
1179 }
1180 }
1181
1182 memset(&mac_info, 0, sizeof(struct macInfo));
1183 /* Update individual M_CAST address list */
1184 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1185 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1186 list_head = &vdev->vpaths[0].mac_addr_list;
1187 if ((netdev_mc_count(dev) +
1188 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1189 vdev->vpaths[0].max_mac_addr_cnt)
1190 goto _set_all_mcast;
1191
1192 /* Delete previous MC's */
1193 for (i = 0; i < mcast_cnt; i++) {
1194 list_for_each_safe(entry, next, list_head) {
1195 mac_entry = (struct vxge_mac_addrs *)entry;
1196 /* Copy the mac address to delete */
1197 mac_address = (u8 *)&mac_entry->macaddr;
1198 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1199
1200 if (is_multicast_ether_addr(mac_info.macaddr)) {
1201 for (vpath_idx = 0; vpath_idx <
1202 vdev->no_of_vpath;
1203 vpath_idx++) {
1204 mac_info.vpath_no = vpath_idx;
1205 status = vxge_del_mac_addr(
1206 vdev,
1207 &mac_info);
1208 }
1209 }
1210 }
1211 }
1212
1213 /* Add new ones */
1214 netdev_for_each_mc_addr(ha, dev) {
1215 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
1216 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1217 vpath_idx++) {
1218 mac_info.vpath_no = vpath_idx;
1219 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1220 status = vxge_add_mac_addr(vdev, &mac_info);
1221 if (status != VXGE_HW_OK) {
1222 vxge_debug_init(VXGE_ERR,
1223 "%s:%d Setting individual"
1224 "multicast address failed",
1225 __func__, __LINE__);
1226 goto _set_all_mcast;
1227 }
1228 }
1229 }
1230
1231 return;
1232_set_all_mcast:
1233 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1234 /* Delete previous MC's */
1235 for (i = 0; i < mcast_cnt; i++) {
1236 list_for_each_safe(entry, next, list_head) {
1237 mac_entry = (struct vxge_mac_addrs *)entry;
1238 /* Copy the mac address to delete */
1239 mac_address = (u8 *)&mac_entry->macaddr;
1240 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1241
1242 if (is_multicast_ether_addr(mac_info.macaddr))
1243 break;
1244 }
1245
1246 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1247 vpath_idx++) {
1248 mac_info.vpath_no = vpath_idx;
1249 status = vxge_del_mac_addr(vdev, &mac_info);
1250 }
1251 }
1252
1253 /* Enable all multicast */
1254 for (i = 0; i < vdev->no_of_vpath; i++) {
1255 vpath = &vdev->vpaths[i];
1256 vxge_assert(vpath->is_open);
1257
1258 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1259 if (status != VXGE_HW_OK) {
1260 vxge_debug_init(VXGE_ERR,
1261 "%s:%d Enabling all multicasts failed",
1262 __func__, __LINE__);
1263 }
1264 vdev->all_multi_flg = 1;
1265 }
1266 dev->flags |= IFF_ALLMULTI;
1267 }
1268
1269 vxge_debug_entryexit(VXGE_TRACE,
1270 "%s:%d Exiting...", __func__, __LINE__);
1271}
1272
1273/**
1274 * vxge_set_mac_addr
1275 * @dev: pointer to the device structure
1276 *
1277 * Update entry "0" (default MAC addr)
1278 */
1279static int vxge_set_mac_addr(struct net_device *dev, void *p)
1280{
1281 struct sockaddr *addr = p;
1282 struct vxgedev *vdev;
1283 struct __vxge_hw_device *hldev;
1284 enum vxge_hw_status status = VXGE_HW_OK;
1285 struct macInfo mac_info_new, mac_info_old;
1286 int vpath_idx = 0;
1287
1288 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1289
1290 vdev = netdev_priv(dev);
1291 hldev = vdev->devh;
1292
1293 if (!is_valid_ether_addr(addr->sa_data))
1294 return -EINVAL;
1295
1296 memset(&mac_info_new, 0, sizeof(struct macInfo));
1297 memset(&mac_info_old, 0, sizeof(struct macInfo));
1298
1299 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1300 __func__, __LINE__);
1301
1302 /* Get the old address */
1303 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1304
1305 /* Copy the new address */
1306 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1307
1308 /* First delete the old mac address from all the vpaths
1309 as we can't specify the index while adding new mac address */
1310 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1311 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1312 if (!vpath->is_open) {
1313 /* This can happen when this interface is added/removed
1314 to the bonding interface. Delete this station address
1315 from the linked list */
1316 vxge_mac_list_del(vpath, &mac_info_old);
1317
1318 /* Add this new address to the linked list
1319 for later restoring */
1320 vxge_mac_list_add(vpath, &mac_info_new);
1321
1322 continue;
1323 }
1324 /* Delete the station address */
1325 mac_info_old.vpath_no = vpath_idx;
1326 status = vxge_del_mac_addr(vdev, &mac_info_old);
1327 }
1328
1329 if (unlikely(!is_vxge_card_up(vdev))) {
1330 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1331 return VXGE_HW_OK;
1332 }
1333
1334 /* Set this mac address to all the vpaths */
1335 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1336 mac_info_new.vpath_no = vpath_idx;
1337 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1338 status = vxge_add_mac_addr(vdev, &mac_info_new);
1339 if (status != VXGE_HW_OK)
1340 return -EINVAL;
1341 }
1342
1343 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1344
1345 return status;
1346}
1347
1348/*
1349 * vxge_vpath_intr_enable
1350 * @vdev: pointer to vdev
1351 * @vp_id: vpath for which to enable the interrupts
1352 *
1353 * Enables the interrupts for the vpath
1354*/
1355static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1356{
1357 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1358 int msix_id = 0;
1359 int tim_msix_id[4] = {0, 1, 0, 0};
1360 int alarm_msix_id = VXGE_ALARM_MSIX_ID;
1361
1362 vxge_hw_vpath_intr_enable(vpath->handle);
1363
1364 if (vdev->config.intr_type == INTA)
1365 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1366 else {
1367 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1368 alarm_msix_id);
1369
1370 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1371 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1372 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1373
1374 /* enable the alarm vector */
1375 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1376 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1377 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1378 }
1379}
1380
1381/*
1382 * vxge_vpath_intr_disable
1383 * @vdev: pointer to vdev
1384 * @vp_id: vpath for which to disable the interrupts
1385 *
1386 * Disables the interrupts for the vpath
1387*/
1388static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1389{
1390 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1391 struct __vxge_hw_device *hldev;
1392 int msix_id;
1393
1394 hldev = pci_get_drvdata(vdev->pdev);
1395
1396 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1397
1398 vxge_hw_vpath_intr_disable(vpath->handle);
1399
1400 if (vdev->config.intr_type == INTA)
1401 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1402 else {
1403 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1404 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1405 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1406
1407 /* disable the alarm vector */
1408 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1409 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
1410 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1411 }
1412}
1413
1414/* list all mac addresses from DA table */
1415static enum vxge_hw_status
1416vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
1417{
1418 enum vxge_hw_status status = VXGE_HW_OK;
1419 unsigned char macmask[ETH_ALEN];
1420 unsigned char macaddr[ETH_ALEN];
1421
1422 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1423 macaddr, macmask);
1424 if (status != VXGE_HW_OK) {
1425 vxge_debug_init(VXGE_ERR,
1426 "DA config list entry failed for vpath:%d",
1427 vpath->device_id);
1428 return status;
1429 }
1430
1431 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1432 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1433 macaddr, macmask);
1434 if (status != VXGE_HW_OK)
1435 break;
1436 }
1437
1438 return status;
1439}
1440
1441/* Store all mac addresses from the list to the DA table */
1442static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1443{
1444 enum vxge_hw_status status = VXGE_HW_OK;
1445 struct macInfo mac_info;
1446 u8 *mac_address = NULL;
1447 struct list_head *entry, *next;
1448
1449 memset(&mac_info, 0, sizeof(struct macInfo));
1450
1451 if (vpath->is_open) {
1452 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1453 mac_address =
1454 (u8 *)&
1455 ((struct vxge_mac_addrs *)entry)->macaddr;
1456 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1457 ((struct vxge_mac_addrs *)entry)->state =
1458 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1459 /* does this mac address already exist in da table? */
1460 status = vxge_search_mac_addr_in_da_table(vpath,
1461 &mac_info);
1462 if (status != VXGE_HW_OK) {
1463 /* Add this mac address to the DA table */
1464 status = vxge_hw_vpath_mac_addr_add(
1465 vpath->handle, mac_info.macaddr,
1466 mac_info.macmask,
1467 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1468 if (status != VXGE_HW_OK) {
1469 vxge_debug_init(VXGE_ERR,
1470 "DA add entry failed for vpath:%d",
1471 vpath->device_id);
1472 ((struct vxge_mac_addrs *)entry)->state
1473 = VXGE_LL_MAC_ADDR_IN_LIST;
1474 }
1475 }
1476 }
1477 }
1478
1479 return status;
1480}
1481
1482/* Store all vlan ids from the list to the vid table */
1483static enum vxge_hw_status
1484vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1485{
1486 enum vxge_hw_status status = VXGE_HW_OK;
1487 struct vxgedev *vdev = vpath->vdev;
1488 u16 vid;
1489
1490 if (!vpath->is_open)
1491 return status;
1492
1493 for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
1494 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1495
1496 return status;
1497}
1498
1499/*
1500 * vxge_reset_vpath
1501 * @vdev: pointer to vdev
1502 * @vp_id: vpath to reset
1503 *
1504 * Resets the vpath
1505*/
1506static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1507{
1508 enum vxge_hw_status status = VXGE_HW_OK;
1509 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1510 int ret = 0;
1511
1512 /* check if device is down already */
1513 if (unlikely(!is_vxge_card_up(vdev)))
1514 return 0;
1515
1516 /* is device reset already scheduled */
1517 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1518 return 0;
1519
1520 if (vpath->handle) {
1521 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1522 if (is_vxge_card_up(vdev) &&
1523 vxge_hw_vpath_recover_from_reset(vpath->handle)
1524 != VXGE_HW_OK) {
1525 vxge_debug_init(VXGE_ERR,
1526 "vxge_hw_vpath_recover_from_reset"
1527 "failed for vpath:%d", vp_id);
1528 return status;
1529 }
1530 } else {
1531 vxge_debug_init(VXGE_ERR,
1532 "vxge_hw_vpath_reset failed for"
1533 "vpath:%d", vp_id);
1534 return status;
1535 }
1536 } else
1537 return VXGE_HW_FAIL;
1538
1539 vxge_restore_vpath_mac_addr(vpath);
1540 vxge_restore_vpath_vid_table(vpath);
1541
1542 /* Enable all broadcast */
1543 vxge_hw_vpath_bcast_enable(vpath->handle);
1544
1545 /* Enable all multicast */
1546 if (vdev->all_multi_flg) {
1547 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1548 if (status != VXGE_HW_OK)
1549 vxge_debug_init(VXGE_ERR,
1550 "%s:%d Enabling multicast failed",
1551 __func__, __LINE__);
1552 }
1553
1554 /* Enable the interrupts */
1555 vxge_vpath_intr_enable(vdev, vp_id);
1556
1557 smp_wmb();
1558
1559 /* Enable the flow of traffic through the vpath */
1560 vxge_hw_vpath_enable(vpath->handle);
1561
1562 smp_wmb();
1563 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
1564 vpath->ring.last_status = VXGE_HW_OK;
1565
1566 /* Vpath reset done */
1567 clear_bit(vp_id, &vdev->vp_reset);
1568
1569 /* Start the vpath queue */
1570 if (netif_tx_queue_stopped(vpath->fifo.txq))
1571 netif_tx_wake_queue(vpath->fifo.txq);
1572
1573 return ret;
1574}
1575
1576/* Configure CI */
1577static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1578{
1579 int i = 0;
1580
1581 /* Enable CI for RTI */
1582 if (vdev->config.intr_type == MSI_X) {
1583 for (i = 0; i < vdev->no_of_vpath; i++) {
1584 struct __vxge_hw_ring *hw_ring;
1585
1586 hw_ring = vdev->vpaths[i].ring.handle;
1587 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1588 }
1589 }
1590
1591 /* Enable CI for TTI */
1592 for (i = 0; i < vdev->no_of_vpath; i++) {
1593 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1594 vxge_hw_vpath_tti_ci_set(hw_fifo);
1595 /*
1596 * For Inta (with or without napi), Set CI ON for only one
1597 * vpath. (Have only one free running timer).
1598 */
1599 if ((vdev->config.intr_type == INTA) && (i == 0))
1600 break;
1601 }
1602
1603 return;
1604}
1605
1606static int do_vxge_reset(struct vxgedev *vdev, int event)
1607{
1608 enum vxge_hw_status status;
1609 int ret = 0, vp_id, i;
1610
1611 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1612
1613 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1614 /* check if device is down already */
1615 if (unlikely(!is_vxge_card_up(vdev)))
1616 return 0;
1617
1618 /* is reset already scheduled */
1619 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1620 return 0;
1621 }
1622
1623 if (event == VXGE_LL_FULL_RESET) {
1624 netif_carrier_off(vdev->ndev);
1625
1626 /* wait for all the vpath reset to complete */
1627 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1628 while (test_bit(vp_id, &vdev->vp_reset))
1629 msleep(50);
1630 }
1631
1632 netif_carrier_on(vdev->ndev);
1633
1634 /* if execution mode is set to debug, don't reset the adapter */
1635 if (unlikely(vdev->exec_mode)) {
1636 vxge_debug_init(VXGE_ERR,
1637 "%s: execution mode is debug, returning..",
1638 vdev->ndev->name);
1639 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1640 netif_tx_stop_all_queues(vdev->ndev);
1641 return 0;
1642 }
1643 }
1644
1645 if (event == VXGE_LL_FULL_RESET) {
1646 vxge_hw_device_wait_receive_idle(vdev->devh);
1647 vxge_hw_device_intr_disable(vdev->devh);
1648
1649 switch (vdev->cric_err_event) {
1650 case VXGE_HW_EVENT_UNKNOWN:
1651 netif_tx_stop_all_queues(vdev->ndev);
1652 vxge_debug_init(VXGE_ERR,
1653 "fatal: %s: Disabling device due to"
1654 "unknown error",
1655 vdev->ndev->name);
1656 ret = -EPERM;
1657 goto out;
1658 case VXGE_HW_EVENT_RESET_START:
1659 break;
1660 case VXGE_HW_EVENT_RESET_COMPLETE:
1661 case VXGE_HW_EVENT_LINK_DOWN:
1662 case VXGE_HW_EVENT_LINK_UP:
1663 case VXGE_HW_EVENT_ALARM_CLEARED:
1664 case VXGE_HW_EVENT_ECCERR:
1665 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1666 ret = -EPERM;
1667 goto out;
1668 case VXGE_HW_EVENT_FIFO_ERR:
1669 case VXGE_HW_EVENT_VPATH_ERR:
1670 break;
1671 case VXGE_HW_EVENT_CRITICAL_ERR:
1672 netif_tx_stop_all_queues(vdev->ndev);
1673 vxge_debug_init(VXGE_ERR,
1674 "fatal: %s: Disabling device due to"
1675 "serious error",
1676 vdev->ndev->name);
1677 /* SOP or device reset required */
1678 /* This event is not currently used */
1679 ret = -EPERM;
1680 goto out;
1681 case VXGE_HW_EVENT_SERR:
1682 netif_tx_stop_all_queues(vdev->ndev);
1683 vxge_debug_init(VXGE_ERR,
1684 "fatal: %s: Disabling device due to"
1685 "serious error",
1686 vdev->ndev->name);
1687 ret = -EPERM;
1688 goto out;
1689 case VXGE_HW_EVENT_SRPCIM_SERR:
1690 case VXGE_HW_EVENT_MRPCIM_SERR:
1691 ret = -EPERM;
1692 goto out;
1693 case VXGE_HW_EVENT_SLOT_FREEZE:
1694 netif_tx_stop_all_queues(vdev->ndev);
1695 vxge_debug_init(VXGE_ERR,
1696 "fatal: %s: Disabling device due to"
1697 "slot freeze",
1698 vdev->ndev->name);
1699 ret = -EPERM;
1700 goto out;
1701 default:
1702 break;
1703
1704 }
1705 }
1706
1707 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1708 netif_tx_stop_all_queues(vdev->ndev);
1709
1710 if (event == VXGE_LL_FULL_RESET) {
1711 status = vxge_reset_all_vpaths(vdev);
1712 if (status != VXGE_HW_OK) {
1713 vxge_debug_init(VXGE_ERR,
1714 "fatal: %s: can not reset vpaths",
1715 vdev->ndev->name);
1716 ret = -EPERM;
1717 goto out;
1718 }
1719 }
1720
1721 if (event == VXGE_LL_COMPL_RESET) {
1722 for (i = 0; i < vdev->no_of_vpath; i++)
1723 if (vdev->vpaths[i].handle) {
1724 if (vxge_hw_vpath_recover_from_reset(
1725 vdev->vpaths[i].handle)
1726 != VXGE_HW_OK) {
1727 vxge_debug_init(VXGE_ERR,
1728 "vxge_hw_vpath_recover_"
1729 "from_reset failed for vpath: "
1730 "%d", i);
1731 ret = -EPERM;
1732 goto out;
1733 }
1734 } else {
1735 vxge_debug_init(VXGE_ERR,
1736 "vxge_hw_vpath_reset failed for "
1737 "vpath:%d", i);
1738 ret = -EPERM;
1739 goto out;
1740 }
1741 }
1742
1743 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1744 /* Reprogram the DA table with populated mac addresses */
1745 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1746 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1747 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1748 }
1749
1750 /* enable vpath interrupts */
1751 for (i = 0; i < vdev->no_of_vpath; i++)
1752 vxge_vpath_intr_enable(vdev, i);
1753
1754 vxge_hw_device_intr_enable(vdev->devh);
1755
1756 smp_wmb();
1757
1758 /* Indicate card up */
1759 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1760
1761 /* Get the traffic to flow through the vpaths */
1762 for (i = 0; i < vdev->no_of_vpath; i++) {
1763 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1764 smp_wmb();
1765 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1766 }
1767
1768 netif_tx_wake_all_queues(vdev->ndev);
1769 }
1770
1771 /* configure CI */
1772 vxge_config_ci_for_tti_rti(vdev);
1773
1774out:
1775 vxge_debug_entryexit(VXGE_TRACE,
1776 "%s:%d Exiting...", __func__, __LINE__);
1777
1778 /* Indicate reset done */
1779 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1780 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1781 return ret;
1782}
1783
1784/*
1785 * vxge_reset
1786 * @vdev: pointer to ll device
1787 *
1788 * driver may reset the chip on events of serr, eccerr, etc
1789 */
1790static void vxge_reset(struct work_struct *work)
1791{
1792 struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1793
1794 if (!netif_running(vdev->ndev))
1795 return;
1796
1797 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1798}
1799
1800/**
1801 * vxge_poll - Receive handler when Receive Polling is used.
1802 * @dev: pointer to the device structure.
1803 * @budget: Number of packets budgeted to be processed in this iteration.
1804 *
1805 * This function comes into picture only if Receive side is being handled
1806 * through polling (called NAPI in linux). It mostly does what the normal
1807 * Rx interrupt handler does in terms of descriptor and packet processing
1808 * but not in an interrupt context. Also it will process a specified number
1809 * of packets at most in one iteration. This value is passed down by the
1810 * kernel as the function argument 'budget'.
1811 */
1812static int vxge_poll_msix(struct napi_struct *napi, int budget)
1813{
1814 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1815 int pkts_processed;
1816 int budget_org = budget;
1817
1818 ring->budget = budget;
1819 ring->pkts_processed = 0;
1820 vxge_hw_vpath_poll_rx(ring->handle);
1821 pkts_processed = ring->pkts_processed;
1822
1823 if (ring->pkts_processed < budget_org) {
1824 napi_complete(napi);
1825
1826 /* Re enable the Rx interrupts for the vpath */
1827 vxge_hw_channel_msix_unmask(
1828 (struct __vxge_hw_channel *)ring->handle,
1829 ring->rx_vector_no);
1830 mmiowb();
1831 }
1832
1833 /* We are copying and returning the local variable, in case if after
1834 * clearing the msix interrupt above, if the interrupt fires right
1835 * away which can preempt this NAPI thread */
1836 return pkts_processed;
1837}
1838
1839static int vxge_poll_inta(struct napi_struct *napi, int budget)
1840{
1841 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1842 int pkts_processed = 0;
1843 int i;
1844 int budget_org = budget;
1845 struct vxge_ring *ring;
1846
1847 struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
1848
1849 for (i = 0; i < vdev->no_of_vpath; i++) {
1850 ring = &vdev->vpaths[i].ring;
1851 ring->budget = budget;
1852 ring->pkts_processed = 0;
1853 vxge_hw_vpath_poll_rx(ring->handle);
1854 pkts_processed += ring->pkts_processed;
1855 budget -= ring->pkts_processed;
1856 if (budget <= 0)
1857 break;
1858 }
1859
1860 VXGE_COMPLETE_ALL_TX(vdev);
1861
1862 if (pkts_processed < budget_org) {
1863 napi_complete(napi);
1864 /* Re enable the Rx interrupts for the ring */
1865 vxge_hw_device_unmask_all(hldev);
1866 vxge_hw_device_flush_io(hldev);
1867 }
1868
1869 return pkts_processed;
1870}
1871
1872#ifdef CONFIG_NET_POLL_CONTROLLER
1873/**
1874 * vxge_netpoll - netpoll event handler entry point
1875 * @dev : pointer to the device structure.
1876 * Description:
1877 * This function will be called by upper layer to check for events on the
1878 * interface in situations where interrupts are disabled. It is used for
1879 * specific in-kernel networking tasks, such as remote consoles and kernel
1880 * debugging over the network (example netdump in RedHat).
1881 */
1882static void vxge_netpoll(struct net_device *dev)
1883{
1884 struct __vxge_hw_device *hldev;
1885 struct vxgedev *vdev;
1886
1887 vdev = netdev_priv(dev);
1888 hldev = pci_get_drvdata(vdev->pdev);
1889
1890 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1891
1892 if (pci_channel_offline(vdev->pdev))
1893 return;
1894
1895 disable_irq(dev->irq);
1896 vxge_hw_device_clear_tx_rx(hldev);
1897
1898 vxge_hw_device_clear_tx_rx(hldev);
1899 VXGE_COMPLETE_ALL_RX(vdev);
1900 VXGE_COMPLETE_ALL_TX(vdev);
1901
1902 enable_irq(dev->irq);
1903
1904 vxge_debug_entryexit(VXGE_TRACE,
1905 "%s:%d Exiting...", __func__, __LINE__);
1906}
1907#endif
1908
1909/* RTH configuration */
1910static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1911{
1912 enum vxge_hw_status status = VXGE_HW_OK;
1913 struct vxge_hw_rth_hash_types hash_types;
1914 u8 itable[256] = {0}; /* indirection table */
1915 u8 mtable[256] = {0}; /* CPU to vpath mapping */
1916 int index;
1917
1918 /*
1919 * Filling
1920 * - itable with bucket numbers
1921 * - mtable with bucket-to-vpath mapping
1922 */
1923 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1924 itable[index] = index;
1925 mtable[index] = index % vdev->no_of_vpath;
1926 }
1927
1928 /* set indirection table, bucket-to-vpath mapping */
1929 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1930 vdev->no_of_vpath,
1931 mtable, itable,
1932 vdev->config.rth_bkt_sz);
1933 if (status != VXGE_HW_OK) {
1934 vxge_debug_init(VXGE_ERR,
1935 "RTH indirection table configuration failed "
1936 "for vpath:%d", vdev->vpaths[0].device_id);
1937 return status;
1938 }
1939
1940 /* Fill RTH hash types */
1941 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1942 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1943 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1944 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1945 hash_types.hash_type_tcpipv6ex_en =
1946 vdev->config.rth_hash_type_tcpipv6ex;
1947 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1948
1949 /*
1950 * Because the itable_set() method uses the active_table field
1951 * for the target virtual path the RTH config should be updated
1952 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1953 * when steering frames.
1954 */
1955 for (index = 0; index < vdev->no_of_vpath; index++) {
1956 status = vxge_hw_vpath_rts_rth_set(
1957 vdev->vpaths[index].handle,
1958 vdev->config.rth_algorithm,
1959 &hash_types,
1960 vdev->config.rth_bkt_sz);
1961 if (status != VXGE_HW_OK) {
1962 vxge_debug_init(VXGE_ERR,
1963 "RTH configuration failed for vpath:%d",
1964 vdev->vpaths[index].device_id);
1965 return status;
1966 }
1967 }
1968
1969 return status;
1970}
1971
1972/* reset vpaths */
1973enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1974{
1975 enum vxge_hw_status status = VXGE_HW_OK;
1976 struct vxge_vpath *vpath;
1977 int i;
1978
1979 for (i = 0; i < vdev->no_of_vpath; i++) {
1980 vpath = &vdev->vpaths[i];
1981 if (vpath->handle) {
1982 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1983 if (is_vxge_card_up(vdev) &&
1984 vxge_hw_vpath_recover_from_reset(
1985 vpath->handle) != VXGE_HW_OK) {
1986 vxge_debug_init(VXGE_ERR,
1987 "vxge_hw_vpath_recover_"
1988 "from_reset failed for vpath: "
1989 "%d", i);
1990 return status;
1991 }
1992 } else {
1993 vxge_debug_init(VXGE_ERR,
1994 "vxge_hw_vpath_reset failed for "
1995 "vpath:%d", i);
1996 return status;
1997 }
1998 }
1999 }
2000
2001 return status;
2002}
2003
2004/* close vpaths */
2005static void vxge_close_vpaths(struct vxgedev *vdev, int index)
2006{
2007 struct vxge_vpath *vpath;
2008 int i;
2009
2010 for (i = index; i < vdev->no_of_vpath; i++) {
2011 vpath = &vdev->vpaths[i];
2012
2013 if (vpath->handle && vpath->is_open) {
2014 vxge_hw_vpath_close(vpath->handle);
2015 vdev->stats.vpaths_open--;
2016 }
2017 vpath->is_open = 0;
2018 vpath->handle = NULL;
2019 }
2020}
2021
2022/* open vpaths */
2023static int vxge_open_vpaths(struct vxgedev *vdev)
2024{
2025 struct vxge_hw_vpath_attr attr;
2026 enum vxge_hw_status status;
2027 struct vxge_vpath *vpath;
2028 u32 vp_id = 0;
2029 int i;
2030
2031 for (i = 0; i < vdev->no_of_vpath; i++) {
2032 vpath = &vdev->vpaths[i];
2033 vxge_assert(vpath->is_configured);
2034
2035 if (!vdev->titan1) {
2036 struct vxge_hw_vp_config *vcfg;
2037 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2038
2039 vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2040 vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2041 vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2042 vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2043 vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2044 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2045 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2046 vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2047 vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2048 }
2049
2050 attr.vp_id = vpath->device_id;
2051 attr.fifo_attr.callback = vxge_xmit_compl;
2052 attr.fifo_attr.txdl_term = vxge_tx_term;
2053 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2054 attr.fifo_attr.userdata = &vpath->fifo;
2055
2056 attr.ring_attr.callback = vxge_rx_1b_compl;
2057 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2058 attr.ring_attr.rxd_term = vxge_rx_term;
2059 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2060 attr.ring_attr.userdata = &vpath->ring;
2061
2062 vpath->ring.ndev = vdev->ndev;
2063 vpath->ring.pdev = vdev->pdev;
2064
2065 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
2066 if (status == VXGE_HW_OK) {
2067 vpath->fifo.handle =
2068 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2069 vpath->ring.handle =
2070 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
2071 vpath->fifo.tx_steering_type =
2072 vdev->config.tx_steering_type;
2073 vpath->fifo.ndev = vdev->ndev;
2074 vpath->fifo.pdev = vdev->pdev;
2075 if (vdev->config.tx_steering_type)
2076 vpath->fifo.txq =
2077 netdev_get_tx_queue(vdev->ndev, i);
2078 else
2079 vpath->fifo.txq =
2080 netdev_get_tx_queue(vdev->ndev, 0);
2081 vpath->fifo.indicate_max_pkts =
2082 vdev->config.fifo_indicate_max_pkts;
2083 vpath->fifo.tx_vector_no = 0;
2084 vpath->ring.rx_vector_no = 0;
2085 vpath->ring.rx_hwts = vdev->rx_hwts;
2086 vpath->is_open = 1;
2087 vdev->vp_handles[i] = vpath->handle;
2088 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2089 vdev->stats.vpaths_open++;
2090 } else {
2091 vdev->stats.vpath_open_fail++;
2092 vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
2093 "open with status: %d",
2094 vdev->ndev->name, vpath->device_id,
2095 status);
2096 vxge_close_vpaths(vdev, 0);
2097 return -EPERM;
2098 }
2099
2100 vp_id = vpath->handle->vpath->vp_id;
2101 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2102 }
2103
2104 return VXGE_HW_OK;
2105}
2106
2107/**
2108 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2109 * if the interrupts are not within a range
2110 * @fifo: pointer to transmit fifo structure
2111 * Description: The function changes boundary timer and restriction timer
2112 * value depends on the traffic
2113 * Return Value: None
2114 */
2115static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2116{
2117 fifo->interrupt_count++;
2118 if (jiffies > fifo->jiffies + HZ / 100) {
2119 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2120
2121 fifo->jiffies = jiffies;
2122 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2123 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2124 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2125 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2126 } else if (hw_fifo->rtimer != 0) {
2127 hw_fifo->rtimer = 0;
2128 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2129 }
2130 fifo->interrupt_count = 0;
2131 }
2132}
2133
2134/**
2135 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2136 * if the interrupts are not within a range
2137 * @ring: pointer to receive ring structure
2138 * Description: The function increases of decreases the packet counts within
2139 * the ranges of traffic utilization, if the interrupts due to this ring are
2140 * not within a fixed range.
2141 * Return Value: Nothing
2142 */
2143static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2144{
2145 ring->interrupt_count++;
2146 if (jiffies > ring->jiffies + HZ / 100) {
2147 struct __vxge_hw_ring *hw_ring = ring->handle;
2148
2149 ring->jiffies = jiffies;
2150 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2151 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2152 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2153 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2154 } else if (hw_ring->rtimer != 0) {
2155 hw_ring->rtimer = 0;
2156 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2157 }
2158 ring->interrupt_count = 0;
2159 }
2160}
2161
2162/*
2163 * vxge_isr_napi
2164 * @irq: the irq of the device.
2165 * @dev_id: a void pointer to the hldev structure of the Titan device
2166 * @ptregs: pointer to the registers pushed on the stack.
2167 *
2168 * This function is the ISR handler of the device when napi is enabled. It
2169 * identifies the reason for the interrupt and calls the relevant service
2170 * routines.
2171 */
2172static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2173{
2174 struct net_device *dev;
2175 struct __vxge_hw_device *hldev;
2176 u64 reason;
2177 enum vxge_hw_status status;
2178 struct vxgedev *vdev = (struct vxgedev *)dev_id;
2179
2180 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2181
2182 dev = vdev->ndev;
2183 hldev = pci_get_drvdata(vdev->pdev);
2184
2185 if (pci_channel_offline(vdev->pdev))
2186 return IRQ_NONE;
2187
2188 if (unlikely(!is_vxge_card_up(vdev)))
2189 return IRQ_HANDLED;
2190
2191 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
2192 if (status == VXGE_HW_OK) {
2193 vxge_hw_device_mask_all(hldev);
2194
2195 if (reason &
2196 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2197 vdev->vpaths_deployed >>
2198 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2199
2200 vxge_hw_device_clear_tx_rx(hldev);
2201 napi_schedule(&vdev->napi);
2202 vxge_debug_intr(VXGE_TRACE,
2203 "%s:%d Exiting...", __func__, __LINE__);
2204 return IRQ_HANDLED;
2205 } else
2206 vxge_hw_device_unmask_all(hldev);
2207 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2208 (status == VXGE_HW_ERR_CRITICAL) ||
2209 (status == VXGE_HW_ERR_FIFO))) {
2210 vxge_hw_device_mask_all(hldev);
2211 vxge_hw_device_flush_io(hldev);
2212 return IRQ_HANDLED;
2213 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2214 return IRQ_HANDLED;
2215
2216 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2217 return IRQ_NONE;
2218}
2219
2220#ifdef CONFIG_PCI_MSI
2221
2222static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2223{
2224 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2225
2226 adaptive_coalesce_tx_interrupts(fifo);
2227
2228 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2229 fifo->tx_vector_no);
2230
2231 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2232 fifo->tx_vector_no);
2233
2234 VXGE_COMPLETE_VPATH_TX(fifo);
2235
2236 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2237 fifo->tx_vector_no);
2238
2239 mmiowb();
2240
2241 return IRQ_HANDLED;
2242}
2243
2244static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2245{
2246 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2247
2248 adaptive_coalesce_rx_interrupts(ring);
2249
2250 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2251 ring->rx_vector_no);
2252
2253 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2254 ring->rx_vector_no);
2255
2256 napi_schedule(&ring->napi);
2257 return IRQ_HANDLED;
2258}
2259
2260static irqreturn_t
2261vxge_alarm_msix_handle(int irq, void *dev_id)
2262{
2263 int i;
2264 enum vxge_hw_status status;
2265 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2266 struct vxgedev *vdev = vpath->vdev;
2267 int msix_id = (vpath->handle->vpath->vp_id *
2268 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2269
2270 for (i = 0; i < vdev->no_of_vpath; i++) {
2271 /* Reduce the chance of losing alarm interrupts by masking
2272 * the vector. A pending bit will be set if an alarm is
2273 * generated and on unmask the interrupt will be fired.
2274 */
2275 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2276 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2277 mmiowb();
2278
2279 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2280 vdev->exec_mode);
2281 if (status == VXGE_HW_OK) {
2282 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2283 msix_id);
2284 mmiowb();
2285 continue;
2286 }
2287 vxge_debug_intr(VXGE_ERR,
2288 "%s: vxge_hw_vpath_alarm_process failed %x ",
2289 VXGE_DRIVER_NAME, status);
2290 }
2291 return IRQ_HANDLED;
2292}
2293
2294static int vxge_alloc_msix(struct vxgedev *vdev)
2295{
2296 int j, i, ret = 0;
2297 int msix_intr_vect = 0, temp;
2298 vdev->intr_cnt = 0;
2299
2300start:
2301 /* Tx/Rx MSIX Vectors count */
2302 vdev->intr_cnt = vdev->no_of_vpath * 2;
2303
2304 /* Alarm MSIX Vectors count */
2305 vdev->intr_cnt++;
2306
2307 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2308 GFP_KERNEL);
2309 if (!vdev->entries) {
2310 vxge_debug_init(VXGE_ERR,
2311 "%s: memory allocation failed",
2312 VXGE_DRIVER_NAME);
2313 ret = -ENOMEM;
2314 goto alloc_entries_failed;
2315 }
2316
2317 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2318 sizeof(struct vxge_msix_entry),
2319 GFP_KERNEL);
2320 if (!vdev->vxge_entries) {
2321 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2322 VXGE_DRIVER_NAME);
2323 ret = -ENOMEM;
2324 goto alloc_vxge_entries_failed;
2325 }
2326
2327 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
2328
2329 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2330
2331 /* Initialize the fifo vector */
2332 vdev->entries[j].entry = msix_intr_vect;
2333 vdev->vxge_entries[j].entry = msix_intr_vect;
2334 vdev->vxge_entries[j].in_use = 0;
2335 j++;
2336
2337 /* Initialize the ring vector */
2338 vdev->entries[j].entry = msix_intr_vect + 1;
2339 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2340 vdev->vxge_entries[j].in_use = 0;
2341 j++;
2342 }
2343
2344 /* Initialize the alarm vector */
2345 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2346 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2347 vdev->vxge_entries[j].in_use = 0;
2348
2349 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
2350 if (ret > 0) {
2351 vxge_debug_init(VXGE_ERR,
2352 "%s: MSI-X enable failed for %d vectors, ret: %d",
2353 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2354 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
2355 ret = -ENODEV;
2356 goto enable_msix_failed;
2357 }
2358
2359 kfree(vdev->entries);
2360 kfree(vdev->vxge_entries);
2361 vdev->entries = NULL;
2362 vdev->vxge_entries = NULL;
2363 /* Try with less no of vector by reducing no of vpaths count */
2364 temp = (ret - 1)/2;
2365 vxge_close_vpaths(vdev, temp);
2366 vdev->no_of_vpath = temp;
2367 goto start;
2368 } else if (ret < 0) {
2369 ret = -ENODEV;
2370 goto enable_msix_failed;
2371 }
2372 return 0;
2373
2374enable_msix_failed:
2375 kfree(vdev->vxge_entries);
2376alloc_vxge_entries_failed:
2377 kfree(vdev->entries);
2378alloc_entries_failed:
2379 return ret;
2380}
2381
2382static int vxge_enable_msix(struct vxgedev *vdev)
2383{
2384
2385 int i, ret = 0;
2386 /* 0 - Tx, 1 - Rx */
2387 int tim_msix_id[4] = {0, 1, 0, 0};
2388
2389 vdev->intr_cnt = 0;
2390
2391 /* allocate msix vectors */
2392 ret = vxge_alloc_msix(vdev);
2393 if (!ret) {
2394 for (i = 0; i < vdev->no_of_vpath; i++) {
2395 struct vxge_vpath *vpath = &vdev->vpaths[i];
2396
2397 /* If fifo or ring are not enabled, the MSIX vector for
2398 * it should be set to 0.
2399 */
2400 vpath->ring.rx_vector_no = (vpath->device_id *
2401 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2402
2403 vpath->fifo.tx_vector_no = (vpath->device_id *
2404 VXGE_HW_VPATH_MSIX_ACTIVE);
2405
2406 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2407 VXGE_ALARM_MSIX_ID);
2408 }
2409 }
2410
2411 return ret;
2412}
2413
2414static void vxge_rem_msix_isr(struct vxgedev *vdev)
2415{
2416 int intr_cnt;
2417
2418 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
2419 intr_cnt++) {
2420 if (vdev->vxge_entries[intr_cnt].in_use) {
2421 synchronize_irq(vdev->entries[intr_cnt].vector);
2422 free_irq(vdev->entries[intr_cnt].vector,
2423 vdev->vxge_entries[intr_cnt].arg);
2424 vdev->vxge_entries[intr_cnt].in_use = 0;
2425 }
2426 }
2427
2428 kfree(vdev->entries);
2429 kfree(vdev->vxge_entries);
2430 vdev->entries = NULL;
2431 vdev->vxge_entries = NULL;
2432
2433 if (vdev->config.intr_type == MSI_X)
2434 pci_disable_msix(vdev->pdev);
2435}
2436#endif
2437
2438static void vxge_rem_isr(struct vxgedev *vdev)
2439{
2440 struct __vxge_hw_device *hldev;
2441 hldev = pci_get_drvdata(vdev->pdev);
2442
2443#ifdef CONFIG_PCI_MSI
2444 if (vdev->config.intr_type == MSI_X) {
2445 vxge_rem_msix_isr(vdev);
2446 } else
2447#endif
2448 if (vdev->config.intr_type == INTA) {
2449 synchronize_irq(vdev->pdev->irq);
2450 free_irq(vdev->pdev->irq, vdev);
2451 }
2452}
2453
2454static int vxge_add_isr(struct vxgedev *vdev)
2455{
2456 int ret = 0;
2457#ifdef CONFIG_PCI_MSI
2458 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2459 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2460
2461 if (vdev->config.intr_type == MSI_X)
2462 ret = vxge_enable_msix(vdev);
2463
2464 if (ret) {
2465 vxge_debug_init(VXGE_ERR,
2466 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2467 vxge_debug_init(VXGE_ERR,
2468 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2469 vdev->config.intr_type = INTA;
2470 }
2471
2472 if (vdev->config.intr_type == MSI_X) {
2473 for (intr_idx = 0;
2474 intr_idx < (vdev->no_of_vpath *
2475 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2476
2477 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2478 irq_req = 0;
2479
2480 switch (msix_idx) {
2481 case 0:
2482 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2483 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2484 vdev->ndev->name,
2485 vdev->entries[intr_cnt].entry,
2486 pci_fun, vp_idx);
2487 ret = request_irq(
2488 vdev->entries[intr_cnt].vector,
2489 vxge_tx_msix_handle, 0,
2490 vdev->desc[intr_cnt],
2491 &vdev->vpaths[vp_idx].fifo);
2492 vdev->vxge_entries[intr_cnt].arg =
2493 &vdev->vpaths[vp_idx].fifo;
2494 irq_req = 1;
2495 break;
2496 case 1:
2497 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2498 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2499 vdev->ndev->name,
2500 vdev->entries[intr_cnt].entry,
2501 pci_fun, vp_idx);
2502 ret = request_irq(
2503 vdev->entries[intr_cnt].vector,
2504 vxge_rx_msix_napi_handle,
2505 0,
2506 vdev->desc[intr_cnt],
2507 &vdev->vpaths[vp_idx].ring);
2508 vdev->vxge_entries[intr_cnt].arg =
2509 &vdev->vpaths[vp_idx].ring;
2510 irq_req = 1;
2511 break;
2512 }
2513
2514 if (ret) {
2515 vxge_debug_init(VXGE_ERR,
2516 "%s: MSIX - %d Registration failed",
2517 vdev->ndev->name, intr_cnt);
2518 vxge_rem_msix_isr(vdev);
2519 vdev->config.intr_type = INTA;
2520 vxge_debug_init(VXGE_ERR,
2521 "%s: Defaulting to INTA"
2522 , vdev->ndev->name);
2523 goto INTA_MODE;
2524 }
2525
2526 if (irq_req) {
2527 /* We requested for this msix interrupt */
2528 vdev->vxge_entries[intr_cnt].in_use = 1;
2529 msix_idx += vdev->vpaths[vp_idx].device_id *
2530 VXGE_HW_VPATH_MSIX_ACTIVE;
2531 vxge_hw_vpath_msix_unmask(
2532 vdev->vpaths[vp_idx].handle,
2533 msix_idx);
2534 intr_cnt++;
2535 }
2536
2537 /* Point to next vpath handler */
2538 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2539 (vp_idx < (vdev->no_of_vpath - 1)))
2540 vp_idx++;
2541 }
2542
2543 intr_cnt = vdev->no_of_vpath * 2;
2544 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2545 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2546 vdev->ndev->name,
2547 vdev->entries[intr_cnt].entry,
2548 pci_fun);
2549 /* For Alarm interrupts */
2550 ret = request_irq(vdev->entries[intr_cnt].vector,
2551 vxge_alarm_msix_handle, 0,
2552 vdev->desc[intr_cnt],
2553 &vdev->vpaths[0]);
2554 if (ret) {
2555 vxge_debug_init(VXGE_ERR,
2556 "%s: MSIX - %d Registration failed",
2557 vdev->ndev->name, intr_cnt);
2558 vxge_rem_msix_isr(vdev);
2559 vdev->config.intr_type = INTA;
2560 vxge_debug_init(VXGE_ERR,
2561 "%s: Defaulting to INTA",
2562 vdev->ndev->name);
2563 goto INTA_MODE;
2564 }
2565
2566 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2567 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2568 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2569 msix_idx);
2570 vdev->vxge_entries[intr_cnt].in_use = 1;
2571 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2572 }
2573INTA_MODE:
2574#endif
2575
2576 if (vdev->config.intr_type == INTA) {
2577 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2578 "%s:vxge:INTA", vdev->ndev->name);
2579 vxge_hw_device_set_intr_type(vdev->devh,
2580 VXGE_HW_INTR_MODE_IRQLINE);
2581
2582 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2583
2584 ret = request_irq((int) vdev->pdev->irq,
2585 vxge_isr_napi,
2586 IRQF_SHARED, vdev->desc[0], vdev);
2587 if (ret) {
2588 vxge_debug_init(VXGE_ERR,
2589 "%s %s-%d: ISR registration failed",
2590 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2591 return -ENODEV;
2592 }
2593 vxge_debug_init(VXGE_TRACE,
2594 "new %s-%d line allocated",
2595 "IRQ", vdev->pdev->irq);
2596 }
2597
2598 return VXGE_HW_OK;
2599}
2600
2601static void vxge_poll_vp_reset(unsigned long data)
2602{
2603 struct vxgedev *vdev = (struct vxgedev *)data;
2604 int i, j = 0;
2605
2606 for (i = 0; i < vdev->no_of_vpath; i++) {
2607 if (test_bit(i, &vdev->vp_reset)) {
2608 vxge_reset_vpath(vdev, i);
2609 j++;
2610 }
2611 }
2612 if (j && (vdev->config.intr_type != MSI_X)) {
2613 vxge_hw_device_unmask_all(vdev->devh);
2614 vxge_hw_device_flush_io(vdev->devh);
2615 }
2616
2617 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2618}
2619
2620static void vxge_poll_vp_lockup(unsigned long data)
2621{
2622 struct vxgedev *vdev = (struct vxgedev *)data;
2623 enum vxge_hw_status status = VXGE_HW_OK;
2624 struct vxge_vpath *vpath;
2625 struct vxge_ring *ring;
2626 int i;
2627 unsigned long rx_frms;
2628
2629 for (i = 0; i < vdev->no_of_vpath; i++) {
2630 ring = &vdev->vpaths[i].ring;
2631
2632 /* Truncated to machine word size number of frames */
2633 rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
2634
2635 /* Did this vpath received any packets */
2636 if (ring->stats.prev_rx_frms == rx_frms) {
2637 status = vxge_hw_vpath_check_leak(ring->handle);
2638
2639 /* Did it received any packets last time */
2640 if ((VXGE_HW_FAIL == status) &&
2641 (VXGE_HW_FAIL == ring->last_status)) {
2642
2643 /* schedule vpath reset */
2644 if (!test_and_set_bit(i, &vdev->vp_reset)) {
2645 vpath = &vdev->vpaths[i];
2646
2647 /* disable interrupts for this vpath */
2648 vxge_vpath_intr_disable(vdev, i);
2649
2650 /* stop the queue for this vpath */
2651 netif_tx_stop_queue(vpath->fifo.txq);
2652 continue;
2653 }
2654 }
2655 }
2656 ring->stats.prev_rx_frms = rx_frms;
2657 ring->last_status = status;
2658 }
2659
2660 /* Check every 1 milli second */
2661 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2662}
2663
2664static u32 vxge_fix_features(struct net_device *dev, u32 features)
2665{
2666 u32 changed = dev->features ^ features;
2667
2668 /* Enabling RTH requires some of the logic in vxge_device_register and a
2669 * vpath reset. Due to these restrictions, only allow modification
2670 * while the interface is down.
2671 */
2672 if ((changed & NETIF_F_RXHASH) && netif_running(dev))
2673 features ^= NETIF_F_RXHASH;
2674
2675 return features;
2676}
2677
2678static int vxge_set_features(struct net_device *dev, u32 features)
2679{
2680 struct vxgedev *vdev = netdev_priv(dev);
2681 u32 changed = dev->features ^ features;
2682
2683 if (!(changed & NETIF_F_RXHASH))
2684 return 0;
2685
2686 /* !netif_running() ensured by vxge_fix_features() */
2687
2688 vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2689 if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
2690 dev->features = features ^ NETIF_F_RXHASH;
2691 vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
2692 return -EIO;
2693 }
2694
2695 return 0;
2696}
2697
2698/**
2699 * vxge_open
2700 * @dev: pointer to the device structure.
2701 *
2702 * This function is the open entry point of the driver. It mainly calls a
2703 * function to allocate Rx buffers and inserts them into the buffer
2704 * descriptors and then enables the Rx part of the NIC.
2705 * Return value: '0' on success and an appropriate (-)ve integer as
2706 * defined in errno.h file on failure.
2707 */
2708static int vxge_open(struct net_device *dev)
2709{
2710 enum vxge_hw_status status;
2711 struct vxgedev *vdev;
2712 struct __vxge_hw_device *hldev;
2713 struct vxge_vpath *vpath;
2714 int ret = 0;
2715 int i;
2716 u64 val64, function_mode;
2717
2718 vxge_debug_entryexit(VXGE_TRACE,
2719 "%s: %s:%d", dev->name, __func__, __LINE__);
2720
2721 vdev = netdev_priv(dev);
2722 hldev = pci_get_drvdata(vdev->pdev);
2723 function_mode = vdev->config.device_hw_info.function_mode;
2724
2725 /* make sure you have link off by default every time Nic is
2726 * initialized */
2727 netif_carrier_off(dev);
2728
2729 /* Open VPATHs */
2730 status = vxge_open_vpaths(vdev);
2731 if (status != VXGE_HW_OK) {
2732 vxge_debug_init(VXGE_ERR,
2733 "%s: fatal: Vpath open failed", vdev->ndev->name);
2734 ret = -EPERM;
2735 goto out0;
2736 }
2737
2738 vdev->mtu = dev->mtu;
2739
2740 status = vxge_add_isr(vdev);
2741 if (status != VXGE_HW_OK) {
2742 vxge_debug_init(VXGE_ERR,
2743 "%s: fatal: ISR add failed", dev->name);
2744 ret = -EPERM;
2745 goto out1;
2746 }
2747
2748 if (vdev->config.intr_type != MSI_X) {
2749 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2750 vdev->config.napi_weight);
2751 napi_enable(&vdev->napi);
2752 for (i = 0; i < vdev->no_of_vpath; i++) {
2753 vpath = &vdev->vpaths[i];
2754 vpath->ring.napi_p = &vdev->napi;
2755 }
2756 } else {
2757 for (i = 0; i < vdev->no_of_vpath; i++) {
2758 vpath = &vdev->vpaths[i];
2759 netif_napi_add(dev, &vpath->ring.napi,
2760 vxge_poll_msix, vdev->config.napi_weight);
2761 napi_enable(&vpath->ring.napi);
2762 vpath->ring.napi_p = &vpath->ring.napi;
2763 }
2764 }
2765
2766 /* configure RTH */
2767 if (vdev->config.rth_steering) {
2768 status = vxge_rth_configure(vdev);
2769 if (status != VXGE_HW_OK) {
2770 vxge_debug_init(VXGE_ERR,
2771 "%s: fatal: RTH configuration failed",
2772 dev->name);
2773 ret = -EPERM;
2774 goto out2;
2775 }
2776 }
2777 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2778 hldev->config.rth_en ? "enabled" : "disabled");
2779
2780 for (i = 0; i < vdev->no_of_vpath; i++) {
2781 vpath = &vdev->vpaths[i];
2782
2783 /* set initial mtu before enabling the device */
2784 status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
2785 if (status != VXGE_HW_OK) {
2786 vxge_debug_init(VXGE_ERR,
2787 "%s: fatal: can not set new MTU", dev->name);
2788 ret = -EPERM;
2789 goto out2;
2790 }
2791 }
2792
2793 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2794 vxge_debug_init(vdev->level_trace,
2795 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2796 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2797
2798 /* Restore the DA, VID table and also multicast and promiscuous mode
2799 * states
2800 */
2801 if (vdev->all_multi_flg) {
2802 for (i = 0; i < vdev->no_of_vpath; i++) {
2803 vpath = &vdev->vpaths[i];
2804 vxge_restore_vpath_mac_addr(vpath);
2805 vxge_restore_vpath_vid_table(vpath);
2806
2807 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2808 if (status != VXGE_HW_OK)
2809 vxge_debug_init(VXGE_ERR,
2810 "%s:%d Enabling multicast failed",
2811 __func__, __LINE__);
2812 }
2813 }
2814
2815 /* Enable vpath to sniff all unicast/multicast traffic that not
2816 * addressed to them. We allow promiscuous mode for PF only
2817 */
2818
2819 val64 = 0;
2820 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2821 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2822
2823 vxge_hw_mgmt_reg_write(vdev->devh,
2824 vxge_hw_mgmt_reg_type_mrpcim,
2825 0,
2826 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2827 rxmac_authorize_all_addr),
2828 val64);
2829
2830 vxge_hw_mgmt_reg_write(vdev->devh,
2831 vxge_hw_mgmt_reg_type_mrpcim,
2832 0,
2833 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2834 rxmac_authorize_all_vid),
2835 val64);
2836
2837 vxge_set_multicast(dev);
2838
2839 /* Enabling Bcast and mcast for all vpath */
2840 for (i = 0; i < vdev->no_of_vpath; i++) {
2841 vpath = &vdev->vpaths[i];
2842 status = vxge_hw_vpath_bcast_enable(vpath->handle);
2843 if (status != VXGE_HW_OK)
2844 vxge_debug_init(VXGE_ERR,
2845 "%s : Can not enable bcast for vpath "
2846 "id %d", dev->name, i);
2847 if (vdev->config.addr_learn_en) {
2848 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2849 if (status != VXGE_HW_OK)
2850 vxge_debug_init(VXGE_ERR,
2851 "%s : Can not enable mcast for vpath "
2852 "id %d", dev->name, i);
2853 }
2854 }
2855
2856 vxge_hw_device_setpause_data(vdev->devh, 0,
2857 vdev->config.tx_pause_enable,
2858 vdev->config.rx_pause_enable);
2859
2860 if (vdev->vp_reset_timer.function == NULL)
2861 vxge_os_timer(vdev->vp_reset_timer,
2862 vxge_poll_vp_reset, vdev, (HZ/2));
2863
2864 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2865 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2866 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2867 HZ / 2);
2868
2869 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2870
2871 smp_wmb();
2872
2873 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2874 netif_carrier_on(vdev->ndev);
2875 netdev_notice(vdev->ndev, "Link Up\n");
2876 vdev->stats.link_up++;
2877 }
2878
2879 vxge_hw_device_intr_enable(vdev->devh);
2880
2881 smp_wmb();
2882
2883 for (i = 0; i < vdev->no_of_vpath; i++) {
2884 vpath = &vdev->vpaths[i];
2885
2886 vxge_hw_vpath_enable(vpath->handle);
2887 smp_wmb();
2888 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
2889 }
2890
2891 netif_tx_start_all_queues(vdev->ndev);
2892
2893 /* configure CI */
2894 vxge_config_ci_for_tti_rti(vdev);
2895
2896 goto out0;
2897
2898out2:
2899 vxge_rem_isr(vdev);
2900
2901 /* Disable napi */
2902 if (vdev->config.intr_type != MSI_X)
2903 napi_disable(&vdev->napi);
2904 else {
2905 for (i = 0; i < vdev->no_of_vpath; i++)
2906 napi_disable(&vdev->vpaths[i].ring.napi);
2907 }
2908
2909out1:
2910 vxge_close_vpaths(vdev, 0);
2911out0:
2912 vxge_debug_entryexit(VXGE_TRACE,
2913 "%s: %s:%d Exiting...",
2914 dev->name, __func__, __LINE__);
2915 return ret;
2916}
2917
2918/* Loop through the mac address list and delete all the entries */
2919static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2920{
2921
2922 struct list_head *entry, *next;
2923 if (list_empty(&vpath->mac_addr_list))
2924 return;
2925
2926 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2927 list_del(entry);
2928 kfree((struct vxge_mac_addrs *)entry);
2929 }
2930}
2931
2932static void vxge_napi_del_all(struct vxgedev *vdev)
2933{
2934 int i;
2935 if (vdev->config.intr_type != MSI_X)
2936 netif_napi_del(&vdev->napi);
2937 else {
2938 for (i = 0; i < vdev->no_of_vpath; i++)
2939 netif_napi_del(&vdev->vpaths[i].ring.napi);
2940 }
2941}
2942
2943static int do_vxge_close(struct net_device *dev, int do_io)
2944{
2945 enum vxge_hw_status status;
2946 struct vxgedev *vdev;
2947 struct __vxge_hw_device *hldev;
2948 int i;
2949 u64 val64, vpath_vector;
2950 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2951 dev->name, __func__, __LINE__);
2952
2953 vdev = netdev_priv(dev);
2954 hldev = pci_get_drvdata(vdev->pdev);
2955
2956 if (unlikely(!is_vxge_card_up(vdev)))
2957 return 0;
2958
2959 /* If vxge_handle_crit_err task is executing,
2960 * wait till it completes. */
2961 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2962 msleep(50);
2963
2964 if (do_io) {
2965 /* Put the vpath back in normal mode */
2966 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2967 status = vxge_hw_mgmt_reg_read(vdev->devh,
2968 vxge_hw_mgmt_reg_type_mrpcim,
2969 0,
2970 (ulong)offsetof(
2971 struct vxge_hw_mrpcim_reg,
2972 rts_mgr_cbasin_cfg),
2973 &val64);
2974 if (status == VXGE_HW_OK) {
2975 val64 &= ~vpath_vector;
2976 status = vxge_hw_mgmt_reg_write(vdev->devh,
2977 vxge_hw_mgmt_reg_type_mrpcim,
2978 0,
2979 (ulong)offsetof(
2980 struct vxge_hw_mrpcim_reg,
2981 rts_mgr_cbasin_cfg),
2982 val64);
2983 }
2984
2985 /* Remove the function 0 from promiscuous mode */
2986 vxge_hw_mgmt_reg_write(vdev->devh,
2987 vxge_hw_mgmt_reg_type_mrpcim,
2988 0,
2989 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2990 rxmac_authorize_all_addr),
2991 0);
2992
2993 vxge_hw_mgmt_reg_write(vdev->devh,
2994 vxge_hw_mgmt_reg_type_mrpcim,
2995 0,
2996 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2997 rxmac_authorize_all_vid),
2998 0);
2999
3000 smp_wmb();
3001 }
3002
3003 if (vdev->titan1)
3004 del_timer_sync(&vdev->vp_lockup_timer);
3005
3006 del_timer_sync(&vdev->vp_reset_timer);
3007
3008 if (do_io)
3009 vxge_hw_device_wait_receive_idle(hldev);
3010
3011 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3012
3013 /* Disable napi */
3014 if (vdev->config.intr_type != MSI_X)
3015 napi_disable(&vdev->napi);
3016 else {
3017 for (i = 0; i < vdev->no_of_vpath; i++)
3018 napi_disable(&vdev->vpaths[i].ring.napi);
3019 }
3020
3021 netif_carrier_off(vdev->ndev);
3022 netdev_notice(vdev->ndev, "Link Down\n");
3023 netif_tx_stop_all_queues(vdev->ndev);
3024
3025 /* Note that at this point xmit() is stopped by upper layer */
3026 if (do_io)
3027 vxge_hw_device_intr_disable(vdev->devh);
3028
3029 vxge_rem_isr(vdev);
3030
3031 vxge_napi_del_all(vdev);
3032
3033 if (do_io)
3034 vxge_reset_all_vpaths(vdev);
3035
3036 vxge_close_vpaths(vdev, 0);
3037
3038 vxge_debug_entryexit(VXGE_TRACE,
3039 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
3040
3041 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
3042
3043 return 0;
3044}
3045
3046/**
3047 * vxge_close
3048 * @dev: device pointer.
3049 *
3050 * This is the stop entry point of the driver. It needs to undo exactly
3051 * whatever was done by the open entry point, thus it's usually referred to
3052 * as the close function.Among other things this function mainly stops the
3053 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3054 * Return value: '0' on success and an appropriate (-)ve integer as
3055 * defined in errno.h file on failure.
3056 */
3057static int vxge_close(struct net_device *dev)
3058{
3059 do_vxge_close(dev, 1);
3060 return 0;
3061}
3062
3063/**
3064 * vxge_change_mtu
3065 * @dev: net device pointer.
3066 * @new_mtu :the new MTU size for the device.
3067 *
3068 * A driver entry point to change MTU size for the device. Before changing
3069 * the MTU the device must be stopped.
3070 */
3071static int vxge_change_mtu(struct net_device *dev, int new_mtu)
3072{
3073 struct vxgedev *vdev = netdev_priv(dev);
3074
3075 vxge_debug_entryexit(vdev->level_trace,
3076 "%s:%d", __func__, __LINE__);
3077 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
3078 vxge_debug_init(vdev->level_err,
3079 "%s: mtu size is invalid", dev->name);
3080 return -EPERM;
3081 }
3082
3083 /* check if device is down already */
3084 if (unlikely(!is_vxge_card_up(vdev))) {
3085 /* just store new value, will use later on open() */
3086 dev->mtu = new_mtu;
3087 vxge_debug_init(vdev->level_err,
3088 "%s", "device is down on MTU change");
3089 return 0;
3090 }
3091
3092 vxge_debug_init(vdev->level_trace,
3093 "trying to apply new MTU %d", new_mtu);
3094
3095 if (vxge_close(dev))
3096 return -EIO;
3097
3098 dev->mtu = new_mtu;
3099 vdev->mtu = new_mtu;
3100
3101 if (vxge_open(dev))
3102 return -EIO;
3103
3104 vxge_debug_init(vdev->level_trace,
3105 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3106
3107 vxge_debug_entryexit(vdev->level_trace,
3108 "%s:%d Exiting...", __func__, __LINE__);
3109
3110 return 0;
3111}
3112
3113/**
3114 * vxge_get_stats64
3115 * @dev: pointer to the device structure
3116 * @stats: pointer to struct rtnl_link_stats64
3117 *
3118 */
3119static struct rtnl_link_stats64 *
3120vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
3121{
3122 struct vxgedev *vdev = netdev_priv(dev);
3123 int k;
3124
3125 /* net_stats already zeroed by caller */
3126 for (k = 0; k < vdev->no_of_vpath; k++) {
3127 struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
3128 struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
3129 unsigned int start;
3130 u64 packets, bytes, multicast;
3131
3132 do {
3133 start = u64_stats_fetch_begin(&rxstats->syncp);
3134
3135 packets = rxstats->rx_frms;
3136 multicast = rxstats->rx_mcast;
3137 bytes = rxstats->rx_bytes;
3138 } while (u64_stats_fetch_retry(&rxstats->syncp, start));
3139
3140 net_stats->rx_packets += packets;
3141 net_stats->rx_bytes += bytes;
3142 net_stats->multicast += multicast;
3143
3144 net_stats->rx_errors += rxstats->rx_errors;
3145 net_stats->rx_dropped += rxstats->rx_dropped;
3146
3147 do {
3148 start = u64_stats_fetch_begin(&txstats->syncp);
3149
3150 packets = txstats->tx_frms;
3151 bytes = txstats->tx_bytes;
3152 } while (u64_stats_fetch_retry(&txstats->syncp, start));
3153
3154 net_stats->tx_packets += packets;
3155 net_stats->tx_bytes += bytes;
3156 net_stats->tx_errors += txstats->tx_errors;
3157 }
3158
3159 return net_stats;
3160}
3161
3162static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
3163{
3164 enum vxge_hw_status status;
3165 u64 val64;
3166
3167 /* Timestamp is passed to the driver via the FCS, therefore we
3168 * must disable the FCS stripping by the adapter. Since this is
3169 * required for the driver to load (due to a hardware bug),
3170 * there is no need to do anything special here.
3171 */
3172 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3173 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3174 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3175
3176 status = vxge_hw_mgmt_reg_write(devh,
3177 vxge_hw_mgmt_reg_type_mrpcim,
3178 0,
3179 offsetof(struct vxge_hw_mrpcim_reg,
3180 xmac_timestamp),
3181 val64);
3182 vxge_hw_device_flush_io(devh);
3183 devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
3184 return status;
3185}
3186
3187static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3188{
3189 struct hwtstamp_config config;
3190 int i;
3191
3192 if (copy_from_user(&config, data, sizeof(config)))
3193 return -EFAULT;
3194
3195 /* reserved for future extensions */
3196 if (config.flags)
3197 return -EINVAL;
3198
3199 /* Transmit HW Timestamp not supported */
3200 switch (config.tx_type) {
3201 case HWTSTAMP_TX_OFF:
3202 break;
3203 case HWTSTAMP_TX_ON:
3204 default:
3205 return -ERANGE;
3206 }
3207
3208 switch (config.rx_filter) {
3209 case HWTSTAMP_FILTER_NONE:
3210 vdev->rx_hwts = 0;
3211 config.rx_filter = HWTSTAMP_FILTER_NONE;
3212 break;
3213
3214 case HWTSTAMP_FILTER_ALL:
3215 case HWTSTAMP_FILTER_SOME:
3216 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3217 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3218 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3219 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3220 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3221 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3222 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3223 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3224 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3225 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3226 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3227 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3228 if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
3229 return -EFAULT;
3230
3231 vdev->rx_hwts = 1;
3232 config.rx_filter = HWTSTAMP_FILTER_ALL;
3233 break;
3234
3235 default:
3236 return -ERANGE;
3237 }
3238
3239 for (i = 0; i < vdev->no_of_vpath; i++)
3240 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3241
3242 if (copy_to_user(data, &config, sizeof(config)))
3243 return -EFAULT;
3244
3245 return 0;
3246}
3247
3248/**
3249 * vxge_ioctl
3250 * @dev: Device pointer.
3251 * @ifr: An IOCTL specific structure, that can contain a pointer to
3252 * a proprietary structure used to pass information to the driver.
3253 * @cmd: This is used to distinguish between the different commands that
3254 * can be passed to the IOCTL functions.
3255 *
3256 * Entry point for the Ioctl.
3257 */
3258static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3259{
3260 struct vxgedev *vdev = netdev_priv(dev);
3261 int ret;
3262
3263 switch (cmd) {
3264 case SIOCSHWTSTAMP:
3265 ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
3266 if (ret)
3267 return ret;
3268 break;
3269 default:
3270 return -EOPNOTSUPP;
3271 }
3272
3273 return 0;
3274}
3275
3276/**
3277 * vxge_tx_watchdog
3278 * @dev: pointer to net device structure
3279 *
3280 * Watchdog for transmit side.
3281 * This function is triggered if the Tx Queue is stopped
3282 * for a pre-defined amount of time when the Interface is still up.
3283 */
3284static void vxge_tx_watchdog(struct net_device *dev)
3285{
3286 struct vxgedev *vdev;
3287
3288 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3289
3290 vdev = netdev_priv(dev);
3291
3292 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3293
3294 schedule_work(&vdev->reset_task);
3295 vxge_debug_entryexit(VXGE_TRACE,
3296 "%s:%d Exiting...", __func__, __LINE__);
3297}
3298
3299/**
3300 * vxge_vlan_rx_add_vid
3301 * @dev: net device pointer.
3302 * @vid: vid
3303 *
3304 * Add the vlan id to the devices vlan id table
3305 */
3306static void
3307vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3308{
3309 struct vxgedev *vdev = netdev_priv(dev);
3310 struct vxge_vpath *vpath;
3311 int vp_id;
3312
3313 /* Add these vlan to the vid table */
3314 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3315 vpath = &vdev->vpaths[vp_id];
3316 if (!vpath->is_open)
3317 continue;
3318 vxge_hw_vpath_vid_add(vpath->handle, vid);
3319 }
3320 set_bit(vid, vdev->active_vlans);
3321}
3322
3323/**
3324 * vxge_vlan_rx_add_vid
3325 * @dev: net device pointer.
3326 * @vid: vid
3327 *
3328 * Remove the vlan id from the device's vlan id table
3329 */
3330static void
3331vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3332{
3333 struct vxgedev *vdev = netdev_priv(dev);
3334 struct vxge_vpath *vpath;
3335 int vp_id;
3336
3337 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3338
3339 /* Delete this vlan from the vid table */
3340 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3341 vpath = &vdev->vpaths[vp_id];
3342 if (!vpath->is_open)
3343 continue;
3344 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3345 }
3346 vxge_debug_entryexit(VXGE_TRACE,
3347 "%s:%d Exiting...", __func__, __LINE__);
3348 clear_bit(vid, vdev->active_vlans);
3349}
3350
3351static const struct net_device_ops vxge_netdev_ops = {
3352 .ndo_open = vxge_open,
3353 .ndo_stop = vxge_close,
3354 .ndo_get_stats64 = vxge_get_stats64,
3355 .ndo_start_xmit = vxge_xmit,
3356 .ndo_validate_addr = eth_validate_addr,
3357 .ndo_set_multicast_list = vxge_set_multicast,
3358 .ndo_do_ioctl = vxge_ioctl,
3359 .ndo_set_mac_address = vxge_set_mac_addr,
3360 .ndo_change_mtu = vxge_change_mtu,
3361 .ndo_fix_features = vxge_fix_features,
3362 .ndo_set_features = vxge_set_features,
3363 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3364 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3365 .ndo_tx_timeout = vxge_tx_watchdog,
3366#ifdef CONFIG_NET_POLL_CONTROLLER
3367 .ndo_poll_controller = vxge_netpoll,
3368#endif
3369};
3370
3371static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3372 struct vxge_config *config,
3373 int high_dma, int no_of_vpath,
3374 struct vxgedev **vdev_out)
3375{
3376 struct net_device *ndev;
3377 enum vxge_hw_status status = VXGE_HW_OK;
3378 struct vxgedev *vdev;
3379 int ret = 0, no_of_queue = 1;
3380 u64 stat;
3381
3382 *vdev_out = NULL;
3383 if (config->tx_steering_type)
3384 no_of_queue = no_of_vpath;
3385
3386 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3387 no_of_queue);
3388 if (ndev == NULL) {
3389 vxge_debug_init(
3390 vxge_hw_device_trace_level_get(hldev),
3391 "%s : device allocation failed", __func__);
3392 ret = -ENODEV;
3393 goto _out0;
3394 }
3395
3396 vxge_debug_entryexit(
3397 vxge_hw_device_trace_level_get(hldev),
3398 "%s: %s:%d Entering...",
3399 ndev->name, __func__, __LINE__);
3400
3401 vdev = netdev_priv(ndev);
3402 memset(vdev, 0, sizeof(struct vxgedev));
3403
3404 vdev->ndev = ndev;
3405 vdev->devh = hldev;
3406 vdev->pdev = hldev->pdev;
3407 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3408 vdev->rx_hwts = 0;
3409 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3410
3411 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3412
3413 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3414 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3415 NETIF_F_TSO | NETIF_F_TSO6 |
3416 NETIF_F_HW_VLAN_TX;
3417 if (vdev->config.rth_steering != NO_STEERING)
3418 ndev->hw_features |= NETIF_F_RXHASH;
3419
3420 ndev->features |= ndev->hw_features |
3421 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3422
3423 /* Driver entry points */
3424 ndev->irq = vdev->pdev->irq;
3425 ndev->base_addr = (unsigned long) hldev->bar0;
3426
3427 ndev->netdev_ops = &vxge_netdev_ops;
3428
3429 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3430 INIT_WORK(&vdev->reset_task, vxge_reset);
3431
3432 vxge_initialize_ethtool_ops(ndev);
3433
3434 /* Allocate memory for vpath */
3435 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3436 no_of_vpath, GFP_KERNEL);
3437 if (!vdev->vpaths) {
3438 vxge_debug_init(VXGE_ERR,
3439 "%s: vpath memory allocation failed",
3440 vdev->ndev->name);
3441 ret = -ENOMEM;
3442 goto _out1;
3443 }
3444
3445 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3446 "%s : checksuming enabled", __func__);
3447
3448 if (high_dma) {
3449 ndev->features |= NETIF_F_HIGHDMA;
3450 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3451 "%s : using High DMA", __func__);
3452 }
3453
3454 ret = register_netdev(ndev);
3455 if (ret) {
3456 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3457 "%s: %s : device registration failed!",
3458 ndev->name, __func__);
3459 goto _out2;
3460 }
3461
3462 /* Set the factory defined MAC address initially */
3463 ndev->addr_len = ETH_ALEN;
3464
3465 /* Make Link state as off at this point, when the Link change
3466 * interrupt comes the state will be automatically changed to
3467 * the right state.
3468 */
3469 netif_carrier_off(ndev);
3470
3471 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3472 "%s: Ethernet device registered",
3473 ndev->name);
3474
3475 hldev->ndev = ndev;
3476 *vdev_out = vdev;
3477
3478 /* Resetting the Device stats */
3479 status = vxge_hw_mrpcim_stats_access(
3480 hldev,
3481 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3482 0,
3483 0,
3484 &stat);
3485
3486 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3487 vxge_debug_init(
3488 vxge_hw_device_trace_level_get(hldev),
3489 "%s: device stats clear returns"
3490 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3491
3492 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3493 "%s: %s:%d Exiting...",
3494 ndev->name, __func__, __LINE__);
3495
3496 return ret;
3497_out2:
3498 kfree(vdev->vpaths);
3499_out1:
3500 free_netdev(ndev);
3501_out0:
3502 return ret;
3503}
3504
3505/*
3506 * vxge_device_unregister
3507 *
3508 * This function will unregister and free network device
3509 */
3510static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3511{
3512 struct vxgedev *vdev;
3513 struct net_device *dev;
3514 char buf[IFNAMSIZ];
3515
3516 dev = hldev->ndev;
3517 vdev = netdev_priv(dev);
3518
3519 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3520 __func__, __LINE__);
3521
3522 strncpy(buf, dev->name, IFNAMSIZ);
3523
3524 flush_work_sync(&vdev->reset_task);
3525
3526 /* in 2.6 will call stop() if device is up */
3527 unregister_netdev(dev);
3528
3529 kfree(vdev->vpaths);
3530
3531 /* we are safe to free it now */
3532 free_netdev(dev);
3533
3534 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3535 buf);
3536 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3537 __func__, __LINE__);
3538}
3539
3540/*
3541 * vxge_callback_crit_err
3542 *
3543 * This function is called by the alarm handler in interrupt context.
3544 * Driver must analyze it based on the event type.
3545 */
3546static void
3547vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3548 enum vxge_hw_event type, u64 vp_id)
3549{
3550 struct net_device *dev = hldev->ndev;
3551 struct vxgedev *vdev = netdev_priv(dev);
3552 struct vxge_vpath *vpath = NULL;
3553 int vpath_idx;
3554
3555 vxge_debug_entryexit(vdev->level_trace,
3556 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3557
3558 /* Note: This event type should be used for device wide
3559 * indications only - Serious errors, Slot freeze and critical errors
3560 */
3561 vdev->cric_err_event = type;
3562
3563 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3564 vpath = &vdev->vpaths[vpath_idx];
3565 if (vpath->device_id == vp_id)
3566 break;
3567 }
3568
3569 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3570 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3571 vxge_debug_init(VXGE_ERR,
3572 "%s: Slot is frozen", vdev->ndev->name);
3573 } else if (type == VXGE_HW_EVENT_SERR) {
3574 vxge_debug_init(VXGE_ERR,
3575 "%s: Encountered Serious Error",
3576 vdev->ndev->name);
3577 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3578 vxge_debug_init(VXGE_ERR,
3579 "%s: Encountered Critical Error",
3580 vdev->ndev->name);
3581 }
3582
3583 if ((type == VXGE_HW_EVENT_SERR) ||
3584 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3585 if (unlikely(vdev->exec_mode))
3586 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3587 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3588 vxge_hw_device_mask_all(hldev);
3589 if (unlikely(vdev->exec_mode))
3590 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3591 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3592 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3593
3594 if (unlikely(vdev->exec_mode))
3595 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3596 else {
3597 /* check if this vpath is already set for reset */
3598 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3599
3600 /* disable interrupts for this vpath */
3601 vxge_vpath_intr_disable(vdev, vpath_idx);
3602
3603 /* stop the queue for this vpath */
3604 netif_tx_stop_queue(vpath->fifo.txq);
3605 }
3606 }
3607 }
3608
3609 vxge_debug_entryexit(vdev->level_trace,
3610 "%s: %s:%d Exiting...",
3611 vdev->ndev->name, __func__, __LINE__);
3612}
3613
3614static void verify_bandwidth(void)
3615{
3616 int i, band_width, total = 0, equal_priority = 0;
3617
3618 /* 1. If user enters 0 for some fifo, give equal priority to all */
3619 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3620 if (bw_percentage[i] == 0) {
3621 equal_priority = 1;
3622 break;
3623 }
3624 }
3625
3626 if (!equal_priority) {
3627 /* 2. If sum exceeds 100, give equal priority to all */
3628 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3629 if (bw_percentage[i] == 0xFF)
3630 break;
3631
3632 total += bw_percentage[i];
3633 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3634 equal_priority = 1;
3635 break;
3636 }
3637 }
3638 }
3639
3640 if (!equal_priority) {
3641 /* Is all the bandwidth consumed? */
3642 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3643 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3644 /* Split rest of bw equally among next VPs*/
3645 band_width =
3646 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3647 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3648 if (band_width < 2) /* min of 2% */
3649 equal_priority = 1;
3650 else {
3651 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3652 i++)
3653 bw_percentage[i] =
3654 band_width;
3655 }
3656 }
3657 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3658 equal_priority = 1;
3659 }
3660
3661 if (equal_priority) {
3662 vxge_debug_init(VXGE_ERR,
3663 "%s: Assigning equal bandwidth to all the vpaths",
3664 VXGE_DRIVER_NAME);
3665 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3666 VXGE_HW_MAX_VIRTUAL_PATHS;
3667 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3668 bw_percentage[i] = bw_percentage[0];
3669 }
3670}
3671
3672/*
3673 * Vpath configuration
3674 */
3675static int __devinit vxge_config_vpaths(
3676 struct vxge_hw_device_config *device_config,
3677 u64 vpath_mask, struct vxge_config *config_param)
3678{
3679 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3680 u32 txdl_size, txdl_per_memblock;
3681
3682 temp = driver_config->vpath_per_dev;
3683 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3684 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3685 /* No more CPU. Return vpath number as zero.*/
3686 if (driver_config->g_no_cpus == -1)
3687 return 0;
3688
3689 if (!driver_config->g_no_cpus)
3690 driver_config->g_no_cpus = num_online_cpus();
3691
3692 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3693 if (!driver_config->vpath_per_dev)
3694 driver_config->vpath_per_dev = 1;
3695
3696 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3697 if (!vxge_bVALn(vpath_mask, i, 1))
3698 continue;
3699 else
3700 default_no_vpath++;
3701 if (default_no_vpath < driver_config->vpath_per_dev)
3702 driver_config->vpath_per_dev = default_no_vpath;
3703
3704 driver_config->g_no_cpus = driver_config->g_no_cpus -
3705 (driver_config->vpath_per_dev * 2);
3706 if (driver_config->g_no_cpus <= 0)
3707 driver_config->g_no_cpus = -1;
3708 }
3709
3710 if (driver_config->vpath_per_dev == 1) {
3711 vxge_debug_ll_config(VXGE_TRACE,
3712 "%s: Disable tx and rx steering, "
3713 "as single vpath is configured", VXGE_DRIVER_NAME);
3714 config_param->rth_steering = NO_STEERING;
3715 config_param->tx_steering_type = NO_STEERING;
3716 device_config->rth_en = 0;
3717 }
3718
3719 /* configure bandwidth */
3720 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3721 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3722
3723 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3724 device_config->vp_config[i].vp_id = i;
3725 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3726 if (no_of_vpaths < driver_config->vpath_per_dev) {
3727 if (!vxge_bVALn(vpath_mask, i, 1)) {
3728 vxge_debug_ll_config(VXGE_TRACE,
3729 "%s: vpath: %d is not available",
3730 VXGE_DRIVER_NAME, i);
3731 continue;
3732 } else {
3733 vxge_debug_ll_config(VXGE_TRACE,
3734 "%s: vpath: %d available",
3735 VXGE_DRIVER_NAME, i);
3736 no_of_vpaths++;
3737 }
3738 } else {
3739 vxge_debug_ll_config(VXGE_TRACE,
3740 "%s: vpath: %d is not configured, "
3741 "max_config_vpath exceeded",
3742 VXGE_DRIVER_NAME, i);
3743 break;
3744 }
3745
3746 /* Configure Tx fifo's */
3747 device_config->vp_config[i].fifo.enable =
3748 VXGE_HW_FIFO_ENABLE;
3749 device_config->vp_config[i].fifo.max_frags =
3750 MAX_SKB_FRAGS + 1;
3751 device_config->vp_config[i].fifo.memblock_size =
3752 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3753
3754 txdl_size = device_config->vp_config[i].fifo.max_frags *
3755 sizeof(struct vxge_hw_fifo_txd);
3756 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3757
3758 device_config->vp_config[i].fifo.fifo_blocks =
3759 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3760
3761 device_config->vp_config[i].fifo.intr =
3762 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3763
3764 /* Configure tti properties */
3765 device_config->vp_config[i].tti.intr_enable =
3766 VXGE_HW_TIM_INTR_ENABLE;
3767
3768 device_config->vp_config[i].tti.btimer_val =
3769 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3770
3771 device_config->vp_config[i].tti.timer_ac_en =
3772 VXGE_HW_TIM_TIMER_AC_ENABLE;
3773
3774 /* For msi-x with napi (each vector has a handler of its own) -
3775 * Set CI to OFF for all vpaths
3776 */
3777 device_config->vp_config[i].tti.timer_ci_en =
3778 VXGE_HW_TIM_TIMER_CI_DISABLE;
3779
3780 device_config->vp_config[i].tti.timer_ri_en =
3781 VXGE_HW_TIM_TIMER_RI_DISABLE;
3782
3783 device_config->vp_config[i].tti.util_sel =
3784 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3785
3786 device_config->vp_config[i].tti.ltimer_val =
3787 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3788
3789 device_config->vp_config[i].tti.rtimer_val =
3790 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3791
3792 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3793 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3794 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3795 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3796 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3797 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3798 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3799
3800 /* Configure Rx rings */
3801 device_config->vp_config[i].ring.enable =
3802 VXGE_HW_RING_ENABLE;
3803
3804 device_config->vp_config[i].ring.ring_blocks =
3805 VXGE_HW_DEF_RING_BLOCKS;
3806
3807 device_config->vp_config[i].ring.buffer_mode =
3808 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3809
3810 device_config->vp_config[i].ring.rxds_limit =
3811 VXGE_HW_DEF_RING_RXDS_LIMIT;
3812
3813 device_config->vp_config[i].ring.scatter_mode =
3814 VXGE_HW_RING_SCATTER_MODE_A;
3815
3816 /* Configure rti properties */
3817 device_config->vp_config[i].rti.intr_enable =
3818 VXGE_HW_TIM_INTR_ENABLE;
3819
3820 device_config->vp_config[i].rti.btimer_val =
3821 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3822
3823 device_config->vp_config[i].rti.timer_ac_en =
3824 VXGE_HW_TIM_TIMER_AC_ENABLE;
3825
3826 device_config->vp_config[i].rti.timer_ci_en =
3827 VXGE_HW_TIM_TIMER_CI_DISABLE;
3828
3829 device_config->vp_config[i].rti.timer_ri_en =
3830 VXGE_HW_TIM_TIMER_RI_DISABLE;
3831
3832 device_config->vp_config[i].rti.util_sel =
3833 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3834
3835 device_config->vp_config[i].rti.urange_a =
3836 RTI_RX_URANGE_A;
3837 device_config->vp_config[i].rti.urange_b =
3838 RTI_RX_URANGE_B;
3839 device_config->vp_config[i].rti.urange_c =
3840 RTI_RX_URANGE_C;
3841 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3842 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3843 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3844 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3845
3846 device_config->vp_config[i].rti.rtimer_val =
3847 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3848
3849 device_config->vp_config[i].rti.ltimer_val =
3850 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3851
3852 device_config->vp_config[i].rpa_strip_vlan_tag =
3853 vlan_tag_strip;
3854 }
3855
3856 driver_config->vpath_per_dev = temp;
3857 return no_of_vpaths;
3858}
3859
3860/* initialize device configuratrions */
3861static void __devinit vxge_device_config_init(
3862 struct vxge_hw_device_config *device_config,
3863 int *intr_type)
3864{
3865 /* Used for CQRQ/SRQ. */
3866 device_config->dma_blockpool_initial =
3867 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3868
3869 device_config->dma_blockpool_max =
3870 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3871
3872 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3873 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3874
3875#ifndef CONFIG_PCI_MSI
3876 vxge_debug_init(VXGE_ERR,
3877 "%s: This Kernel does not support "
3878 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3879 *intr_type = INTA;
3880#endif
3881
3882 /* Configure whether MSI-X or IRQL. */
3883 switch (*intr_type) {
3884 case INTA:
3885 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3886 break;
3887
3888 case MSI_X:
3889 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3890 break;
3891 }
3892
3893 /* Timer period between device poll */
3894 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3895
3896 /* Configure mac based steering. */
3897 device_config->rts_mac_en = addr_learn_en;
3898
3899 /* Configure Vpaths */
3900 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3901
3902 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3903 __func__);
3904 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3905 device_config->intr_mode);
3906 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3907 device_config->device_poll_millis);
3908 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3909 device_config->rth_en);
3910 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3911 device_config->rth_it_type);
3912}
3913
3914static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3915{
3916 int i;
3917
3918 vxge_debug_init(VXGE_TRACE,
3919 "%s: %d Vpath(s) opened",
3920 vdev->ndev->name, vdev->no_of_vpath);
3921
3922 switch (vdev->config.intr_type) {
3923 case INTA:
3924 vxge_debug_init(VXGE_TRACE,
3925 "%s: Interrupt type INTA", vdev->ndev->name);
3926 break;
3927
3928 case MSI_X:
3929 vxge_debug_init(VXGE_TRACE,
3930 "%s: Interrupt type MSI-X", vdev->ndev->name);
3931 break;
3932 }
3933
3934 if (vdev->config.rth_steering) {
3935 vxge_debug_init(VXGE_TRACE,
3936 "%s: RTH steering enabled for TCP_IPV4",
3937 vdev->ndev->name);
3938 } else {
3939 vxge_debug_init(VXGE_TRACE,
3940 "%s: RTH steering disabled", vdev->ndev->name);
3941 }
3942
3943 switch (vdev->config.tx_steering_type) {
3944 case NO_STEERING:
3945 vxge_debug_init(VXGE_TRACE,
3946 "%s: Tx steering disabled", vdev->ndev->name);
3947 break;
3948 case TX_PRIORITY_STEERING:
3949 vxge_debug_init(VXGE_TRACE,
3950 "%s: Unsupported tx steering option",
3951 vdev->ndev->name);
3952 vxge_debug_init(VXGE_TRACE,
3953 "%s: Tx steering disabled", vdev->ndev->name);
3954 vdev->config.tx_steering_type = 0;
3955 break;
3956 case TX_VLAN_STEERING:
3957 vxge_debug_init(VXGE_TRACE,
3958 "%s: Unsupported tx steering option",
3959 vdev->ndev->name);
3960 vxge_debug_init(VXGE_TRACE,
3961 "%s: Tx steering disabled", vdev->ndev->name);
3962 vdev->config.tx_steering_type = 0;
3963 break;
3964 case TX_MULTIQ_STEERING:
3965 vxge_debug_init(VXGE_TRACE,
3966 "%s: Tx multiqueue steering enabled",
3967 vdev->ndev->name);
3968 break;
3969 case TX_PORT_STEERING:
3970 vxge_debug_init(VXGE_TRACE,
3971 "%s: Tx port steering enabled",
3972 vdev->ndev->name);
3973 break;
3974 default:
3975 vxge_debug_init(VXGE_ERR,
3976 "%s: Unsupported tx steering type",
3977 vdev->ndev->name);
3978 vxge_debug_init(VXGE_TRACE,
3979 "%s: Tx steering disabled", vdev->ndev->name);
3980 vdev->config.tx_steering_type = 0;
3981 }
3982
3983 if (vdev->config.addr_learn_en)
3984 vxge_debug_init(VXGE_TRACE,
3985 "%s: MAC Address learning enabled", vdev->ndev->name);
3986
3987 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3988 if (!vxge_bVALn(vpath_mask, i, 1))
3989 continue;
3990 vxge_debug_ll_config(VXGE_TRACE,
3991 "%s: MTU size - %d", vdev->ndev->name,
3992 ((struct __vxge_hw_device *)(vdev->devh))->
3993 config.vp_config[i].mtu);
3994 vxge_debug_init(VXGE_TRACE,
3995 "%s: VLAN tag stripping %s", vdev->ndev->name,
3996 ((struct __vxge_hw_device *)(vdev->devh))->
3997 config.vp_config[i].rpa_strip_vlan_tag
3998 ? "Enabled" : "Disabled");
3999 vxge_debug_ll_config(VXGE_TRACE,
4000 "%s: Max frags : %d", vdev->ndev->name,
4001 ((struct __vxge_hw_device *)(vdev->devh))->
4002 config.vp_config[i].fifo.max_frags);
4003 break;
4004 }
4005}
4006
4007#ifdef CONFIG_PM
4008/**
4009 * vxge_pm_suspend - vxge power management suspend entry point
4010 *
4011 */
4012static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
4013{
4014 return -ENOSYS;
4015}
4016/**
4017 * vxge_pm_resume - vxge power management resume entry point
4018 *
4019 */
4020static int vxge_pm_resume(struct pci_dev *pdev)
4021{
4022 return -ENOSYS;
4023}
4024
4025#endif
4026
4027/**
4028 * vxge_io_error_detected - called when PCI error is detected
4029 * @pdev: Pointer to PCI device
4030 * @state: The current pci connection state
4031 *
4032 * This function is called after a PCI bus error affecting
4033 * this device has been detected.
4034 */
4035static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
4036 pci_channel_state_t state)
4037{
4038 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4039 struct net_device *netdev = hldev->ndev;
4040
4041 netif_device_detach(netdev);
4042
4043 if (state == pci_channel_io_perm_failure)
4044 return PCI_ERS_RESULT_DISCONNECT;
4045
4046 if (netif_running(netdev)) {
4047 /* Bring down the card, while avoiding PCI I/O */
4048 do_vxge_close(netdev, 0);
4049 }
4050
4051 pci_disable_device(pdev);
4052
4053 return PCI_ERS_RESULT_NEED_RESET;
4054}
4055
4056/**
4057 * vxge_io_slot_reset - called after the pci bus has been reset.
4058 * @pdev: Pointer to PCI device
4059 *
4060 * Restart the card from scratch, as if from a cold-boot.
4061 * At this point, the card has exprienced a hard reset,
4062 * followed by fixups by BIOS, and has its config space
4063 * set up identically to what it was at cold boot.
4064 */
4065static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
4066{
4067 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4068 struct net_device *netdev = hldev->ndev;
4069
4070 struct vxgedev *vdev = netdev_priv(netdev);
4071
4072 if (pci_enable_device(pdev)) {
4073 netdev_err(netdev, "Cannot re-enable device after reset\n");
4074 return PCI_ERS_RESULT_DISCONNECT;
4075 }
4076
4077 pci_set_master(pdev);
4078 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
4079
4080 return PCI_ERS_RESULT_RECOVERED;
4081}
4082
4083/**
4084 * vxge_io_resume - called when traffic can start flowing again.
4085 * @pdev: Pointer to PCI device
4086 *
4087 * This callback is called when the error recovery driver tells
4088 * us that its OK to resume normal operation.
4089 */
4090static void vxge_io_resume(struct pci_dev *pdev)
4091{
4092 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4093 struct net_device *netdev = hldev->ndev;
4094
4095 if (netif_running(netdev)) {
4096 if (vxge_open(netdev)) {
4097 netdev_err(netdev,
4098 "Can't bring device back up after reset\n");
4099 return;
4100 }
4101 }
4102
4103 netif_device_attach(netdev);
4104}
4105
4106static inline u32 vxge_get_num_vfs(u64 function_mode)
4107{
4108 u32 num_functions = 0;
4109
4110 switch (function_mode) {
4111 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4112 case VXGE_HW_FUNCTION_MODE_SRIOV_8:
4113 num_functions = 8;
4114 break;
4115 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4116 num_functions = 1;
4117 break;
4118 case VXGE_HW_FUNCTION_MODE_SRIOV:
4119 case VXGE_HW_FUNCTION_MODE_MRIOV:
4120 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
4121 num_functions = 17;
4122 break;
4123 case VXGE_HW_FUNCTION_MODE_SRIOV_4:
4124 num_functions = 4;
4125 break;
4126 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
4127 num_functions = 2;
4128 break;
4129 case VXGE_HW_FUNCTION_MODE_MRIOV_8:
4130 num_functions = 8; /* TODO */
4131 break;
4132 }
4133 return num_functions;
4134}
4135
4136int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4137{
4138 struct __vxge_hw_device *hldev = vdev->devh;
4139 u32 maj, min, bld, cmaj, cmin, cbld;
4140 enum vxge_hw_status status;
4141 const struct firmware *fw;
4142 int ret;
4143
4144 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4145 if (ret) {
4146 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4147 VXGE_DRIVER_NAME, fw_name);
4148 goto out;
4149 }
4150
4151 /* Load the new firmware onto the adapter */
4152 status = vxge_update_fw_image(hldev, fw->data, fw->size);
4153 if (status != VXGE_HW_OK) {
4154 vxge_debug_init(VXGE_ERR,
4155 "%s: FW image download to adapter failed '%s'.",
4156 VXGE_DRIVER_NAME, fw_name);
4157 ret = -EIO;
4158 goto out;
4159 }
4160
4161 /* Read the version of the new firmware */
4162 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4163 if (status != VXGE_HW_OK) {
4164 vxge_debug_init(VXGE_ERR,
4165 "%s: Upgrade read version failed '%s'.",
4166 VXGE_DRIVER_NAME, fw_name);
4167 ret = -EIO;
4168 goto out;
4169 }
4170
4171 cmaj = vdev->config.device_hw_info.fw_version.major;
4172 cmin = vdev->config.device_hw_info.fw_version.minor;
4173 cbld = vdev->config.device_hw_info.fw_version.build;
4174 /* It's possible the version in /lib/firmware is not the latest version.
4175 * If so, we could get into a loop of trying to upgrade to the latest
4176 * and flashing the older version.
4177 */
4178 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4179 !override) {
4180 ret = -EINVAL;
4181 goto out;
4182 }
4183
4184 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4185 maj, min, bld);
4186
4187 /* Flash the adapter with the new firmware */
4188 status = vxge_hw_flash_fw(hldev);
4189 if (status != VXGE_HW_OK) {
4190 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4191 VXGE_DRIVER_NAME, fw_name);
4192 ret = -EIO;
4193 goto out;
4194 }
4195
4196 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4197 "hard reset before using, thus requiring a system reboot or a "
4198 "hotplug event.\n");
4199
4200out:
4201 release_firmware(fw);
4202 return ret;
4203}
4204
4205static int vxge_probe_fw_update(struct vxgedev *vdev)
4206{
4207 u32 maj, min, bld;
4208 int ret, gpxe = 0;
4209 char *fw_name;
4210
4211 maj = vdev->config.device_hw_info.fw_version.major;
4212 min = vdev->config.device_hw_info.fw_version.minor;
4213 bld = vdev->config.device_hw_info.fw_version.build;
4214
4215 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4216 return 0;
4217
4218 /* Ignore the build number when determining if the current firmware is
4219 * "too new" to load the driver
4220 */
4221 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4222 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4223 "version, unable to load driver\n",
4224 VXGE_DRIVER_NAME);
4225 return -EINVAL;
4226 }
4227
4228 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4229 * work with this driver.
4230 */
4231 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4232 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4233 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4234 return -EINVAL;
4235 }
4236
4237 /* If file not specified, determine gPXE or not */
4238 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4239 int i;
4240 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4241 if (vdev->devh->eprom_versions[i]) {
4242 gpxe = 1;
4243 break;
4244 }
4245 }
4246 if (gpxe)
4247 fw_name = "vxge/X3fw-pxe.ncf";
4248 else
4249 fw_name = "vxge/X3fw.ncf";
4250
4251 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4252 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4253 * probe, so ignore them
4254 */
4255 if (ret != -EINVAL && ret != -ENOENT)
4256 return -EIO;
4257 else
4258 ret = 0;
4259
4260 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4261 VXGE_FW_VER(maj, min, 0)) {
4262 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4263 " be used with this driver.\n"
4264 "Please get the latest version from "
4265 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4266 VXGE_DRIVER_NAME, maj, min, bld);
4267 return -EINVAL;
4268 }
4269
4270 return ret;
4271}
4272
4273static int __devinit is_sriov_initialized(struct pci_dev *pdev)
4274{
4275 int pos;
4276 u16 ctrl;
4277
4278 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4279 if (pos) {
4280 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
4281 if (ctrl & PCI_SRIOV_CTRL_VFE)
4282 return 1;
4283 }
4284 return 0;
4285}
4286
4287/**
4288 * vxge_probe
4289 * @pdev : structure containing the PCI related information of the device.
4290 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4291 * Description:
4292 * This function is called when a new PCI device gets detected and initializes
4293 * it.
4294 * Return value:
4295 * returns 0 on success and negative on failure.
4296 *
4297 */
4298static int __devinit
4299vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4300{
4301 struct __vxge_hw_device *hldev;
4302 enum vxge_hw_status status;
4303 int ret;
4304 int high_dma = 0;
4305 u64 vpath_mask = 0;
4306 struct vxgedev *vdev;
4307 struct vxge_config *ll_config = NULL;
4308 struct vxge_hw_device_config *device_config = NULL;
4309 struct vxge_hw_device_attr attr;
4310 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4311 u8 *macaddr;
4312 struct vxge_mac_addrs *entry;
4313 static int bus = -1, device = -1;
4314 u32 host_type;
4315 u8 new_device = 0;
4316 enum vxge_hw_status is_privileged;
4317 u32 function_mode;
4318 u32 num_vfs = 0;
4319
4320 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4321 attr.pdev = pdev;
4322
4323 /* In SRIOV-17 mode, functions of the same adapter
4324 * can be deployed on different buses
4325 */
4326 if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
4327 !pdev->is_virtfn)
4328 new_device = 1;
4329
4330 bus = pdev->bus->number;
4331 device = PCI_SLOT(pdev->devfn);
4332
4333 if (new_device) {
4334 if (driver_config->config_dev_cnt &&
4335 (driver_config->config_dev_cnt !=
4336 driver_config->total_dev_cnt))
4337 vxge_debug_init(VXGE_ERR,
4338 "%s: Configured %d of %d devices",
4339 VXGE_DRIVER_NAME,
4340 driver_config->config_dev_cnt,
4341 driver_config->total_dev_cnt);
4342 driver_config->config_dev_cnt = 0;
4343 driver_config->total_dev_cnt = 0;
4344 }
4345
4346 /* Now making the CPU based no of vpath calculation
4347 * applicable for individual functions as well.
4348 */
4349 driver_config->g_no_cpus = 0;
4350 driver_config->vpath_per_dev = max_config_vpath;
4351
4352 driver_config->total_dev_cnt++;
4353 if (++driver_config->config_dev_cnt > max_config_dev) {
4354 ret = 0;
4355 goto _exit0;
4356 }
4357
4358 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4359 GFP_KERNEL);
4360 if (!device_config) {
4361 ret = -ENOMEM;
4362 vxge_debug_init(VXGE_ERR,
4363 "device_config : malloc failed %s %d",
4364 __FILE__, __LINE__);
4365 goto _exit0;
4366 }
4367
4368 ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
4369 if (!ll_config) {
4370 ret = -ENOMEM;
4371 vxge_debug_init(VXGE_ERR,
4372 "device_config : malloc failed %s %d",
4373 __FILE__, __LINE__);
4374 goto _exit0;
4375 }
4376 ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4377 ll_config->intr_type = MSI_X;
4378 ll_config->napi_weight = NEW_NAPI_WEIGHT;
4379 ll_config->rth_steering = RTH_STEERING;
4380
4381 /* get the default configuration parameters */
4382 vxge_hw_device_config_default_get(device_config);
4383
4384 /* initialize configuration parameters */
4385 vxge_device_config_init(device_config, &ll_config->intr_type);
4386
4387 ret = pci_enable_device(pdev);
4388 if (ret) {
4389 vxge_debug_init(VXGE_ERR,
4390 "%s : can not enable PCI device", __func__);
4391 goto _exit0;
4392 }
4393
4394 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4395 vxge_debug_ll_config(VXGE_TRACE,
4396 "%s : using 64bit DMA", __func__);
4397
4398 high_dma = 1;
4399
4400 if (pci_set_consistent_dma_mask(pdev,
4401 DMA_BIT_MASK(64))) {
4402 vxge_debug_init(VXGE_ERR,
4403 "%s : unable to obtain 64bit DMA for "
4404 "consistent allocations", __func__);
4405 ret = -ENOMEM;
4406 goto _exit1;
4407 }
4408 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
4409 vxge_debug_ll_config(VXGE_TRACE,
4410 "%s : using 32bit DMA", __func__);
4411 } else {
4412 ret = -ENOMEM;
4413 goto _exit1;
4414 }
4415
4416 ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4417 if (ret) {
4418 vxge_debug_init(VXGE_ERR,
4419 "%s : request regions failed", __func__);
4420 goto _exit1;
4421 }
4422
4423 pci_set_master(pdev);
4424
4425 attr.bar0 = pci_ioremap_bar(pdev, 0);
4426 if (!attr.bar0) {
4427 vxge_debug_init(VXGE_ERR,
4428 "%s : cannot remap io memory bar0", __func__);
4429 ret = -ENODEV;
4430 goto _exit2;
4431 }
4432 vxge_debug_ll_config(VXGE_TRACE,
4433 "pci ioremap bar0: %p:0x%llx",
4434 attr.bar0,
4435 (unsigned long long)pci_resource_start(pdev, 0));
4436
4437 status = vxge_hw_device_hw_info_get(attr.bar0,
4438 &ll_config->device_hw_info);
4439 if (status != VXGE_HW_OK) {
4440 vxge_debug_init(VXGE_ERR,
4441 "%s: Reading of hardware info failed."
4442 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4443 ret = -EINVAL;
4444 goto _exit3;
4445 }
4446
4447 vpath_mask = ll_config->device_hw_info.vpath_mask;
4448 if (vpath_mask == 0) {
4449 vxge_debug_ll_config(VXGE_TRACE,
4450 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4451 ret = -EINVAL;
4452 goto _exit3;
4453 }
4454
4455 vxge_debug_ll_config(VXGE_TRACE,
4456 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4457 (unsigned long long)vpath_mask);
4458
4459 function_mode = ll_config->device_hw_info.function_mode;
4460 host_type = ll_config->device_hw_info.host_type;
4461 is_privileged = __vxge_hw_device_is_privilaged(host_type,
4462 ll_config->device_hw_info.func_id);
4463
4464 /* Check how many vpaths are available */
4465 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4466 if (!((vpath_mask) & vxge_mBIT(i)))
4467 continue;
4468 max_vpath_supported++;
4469 }
4470
4471 if (new_device)
4472 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4473
4474 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4475 if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4476 (ll_config->intr_type != INTA)) {
4477 ret = pci_enable_sriov(pdev, num_vfs);
4478 if (ret)
4479 vxge_debug_ll_config(VXGE_ERR,
4480 "Failed in enabling SRIOV mode: %d\n", ret);
4481 /* No need to fail out, as an error here is non-fatal */
4482 }
4483
4484 /*
4485 * Configure vpaths and get driver configured number of vpaths
4486 * which is less than or equal to the maximum vpaths per function.
4487 */
4488 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
4489 if (!no_of_vpath) {
4490 vxge_debug_ll_config(VXGE_ERR,
4491 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4492 ret = 0;
4493 goto _exit3;
4494 }
4495
4496 /* Setting driver callbacks */
4497 attr.uld_callbacks.link_up = vxge_callback_link_up;
4498 attr.uld_callbacks.link_down = vxge_callback_link_down;
4499 attr.uld_callbacks.crit_err = vxge_callback_crit_err;
4500
4501 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4502 if (status != VXGE_HW_OK) {
4503 vxge_debug_init(VXGE_ERR,
4504 "Failed to initialize device (%d)", status);
4505 ret = -EINVAL;
4506 goto _exit3;
4507 }
4508
4509 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4510 ll_config->device_hw_info.fw_version.minor,
4511 ll_config->device_hw_info.fw_version.build) >=
4512 VXGE_EPROM_FW_VER) {
4513 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4514
4515 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4516 if (status != VXGE_HW_OK) {
4517 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4518 VXGE_DRIVER_NAME);
4519 /* This is a non-fatal error, continue */
4520 }
4521
4522 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4523 hldev->eprom_versions[i] = img[i].version;
4524 if (!img[i].is_valid)
4525 break;
4526 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4527 "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
4528 VXGE_EPROM_IMG_MAJOR(img[i].version),
4529 VXGE_EPROM_IMG_MINOR(img[i].version),
4530 VXGE_EPROM_IMG_FIX(img[i].version),
4531 VXGE_EPROM_IMG_BUILD(img[i].version));
4532 }
4533 }
4534
4535 /* if FCS stripping is not disabled in MAC fail driver load */
4536 status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4537 if (status != VXGE_HW_OK) {
4538 vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4539 " failing driver load", VXGE_DRIVER_NAME);
4540 ret = -EINVAL;
4541 goto _exit4;
4542 }
4543
4544 /* Always enable HWTS. This will always cause the FCS to be invalid,
4545 * due to the fact that HWTS is using the FCS as the location of the
4546 * timestamp. The HW FCS checking will still correctly determine if
4547 * there is a valid checksum, and the FCS is being removed by the driver
4548 * anyway. So no fucntionality is being lost. Since it is always
4549 * enabled, we now simply use the ioctl call to set whether or not the
4550 * driver should be paying attention to the HWTS.
4551 */
4552 if (is_privileged == VXGE_HW_OK) {
4553 status = vxge_timestamp_config(hldev);
4554 if (status != VXGE_HW_OK) {
4555 vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
4556 VXGE_DRIVER_NAME);
4557 ret = -EFAULT;
4558 goto _exit4;
4559 }
4560 }
4561
4562 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4563
4564 /* set private device info */
4565 pci_set_drvdata(pdev, hldev);
4566
4567 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4568 ll_config->addr_learn_en = addr_learn_en;
4569 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4570 ll_config->rth_hash_type_tcpipv4 = 1;
4571 ll_config->rth_hash_type_ipv4 = 0;
4572 ll_config->rth_hash_type_tcpipv6 = 0;
4573 ll_config->rth_hash_type_ipv6 = 0;
4574 ll_config->rth_hash_type_tcpipv6ex = 0;
4575 ll_config->rth_hash_type_ipv6ex = 0;
4576 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4577 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4578 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4579
4580 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4581 &vdev);
4582 if (ret) {
4583 ret = -EINVAL;
4584 goto _exit4;
4585 }
4586
4587 ret = vxge_probe_fw_update(vdev);
4588 if (ret)
4589 goto _exit5;
4590
4591 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4592 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4593 vxge_hw_device_trace_level_get(hldev));
4594
4595 /* set private HW device info */
4596 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4597 vdev->bar0 = attr.bar0;
4598 vdev->max_vpath_supported = max_vpath_supported;
4599 vdev->no_of_vpath = no_of_vpath;
4600
4601 /* Virtual Path count */
4602 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4603 if (!vxge_bVALn(vpath_mask, i, 1))
4604 continue;
4605 if (j >= vdev->no_of_vpath)
4606 break;
4607
4608 vdev->vpaths[j].is_configured = 1;
4609 vdev->vpaths[j].device_id = i;
4610 vdev->vpaths[j].ring.driver_id = j;
4611 vdev->vpaths[j].vdev = vdev;
4612 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4613 memcpy((u8 *)vdev->vpaths[j].macaddr,
4614 ll_config->device_hw_info.mac_addrs[i],
4615 ETH_ALEN);
4616
4617 /* Initialize the mac address list header */
4618 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4619
4620 vdev->vpaths[j].mac_addr_cnt = 0;
4621 vdev->vpaths[j].mcast_addr_cnt = 0;
4622 j++;
4623 }
4624 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4625 vdev->max_config_port = max_config_port;
4626
4627 vdev->vlan_tag_strip = vlan_tag_strip;
4628
4629 /* map the hashing selector table to the configured vpaths */
4630 for (i = 0; i < vdev->no_of_vpath; i++)
4631 vdev->vpath_selector[i] = vpath_selector[i];
4632
4633 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4634
4635 ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4636 ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4637 ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4638
4639 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4640 vdev->ndev->name, ll_config->device_hw_info.serial_number);
4641
4642 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4643 vdev->ndev->name, ll_config->device_hw_info.part_number);
4644
4645 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4646 vdev->ndev->name, ll_config->device_hw_info.product_desc);
4647
4648 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4649 vdev->ndev->name, macaddr);
4650
4651 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4652 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4653
4654 vxge_debug_init(VXGE_TRACE,
4655 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4656 ll_config->device_hw_info.fw_version.version,
4657 ll_config->device_hw_info.fw_date.date);
4658
4659 if (new_device) {
4660 switch (ll_config->device_hw_info.function_mode) {
4661 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4662 vxge_debug_init(VXGE_TRACE,
4663 "%s: Single Function Mode Enabled", vdev->ndev->name);
4664 break;
4665 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4666 vxge_debug_init(VXGE_TRACE,
4667 "%s: Multi Function Mode Enabled", vdev->ndev->name);
4668 break;
4669 case VXGE_HW_FUNCTION_MODE_SRIOV:
4670 vxge_debug_init(VXGE_TRACE,
4671 "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4672 break;
4673 case VXGE_HW_FUNCTION_MODE_MRIOV:
4674 vxge_debug_init(VXGE_TRACE,
4675 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4676 break;
4677 }
4678 }
4679
4680 vxge_print_parm(vdev, vpath_mask);
4681
4682 /* Store the fw version for ethttool option */
4683 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
4684 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4685 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4686
4687 /* Copy the station mac address to the list */
4688 for (i = 0; i < vdev->no_of_vpath; i++) {
4689 entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
4690 if (NULL == entry) {
4691 vxge_debug_init(VXGE_ERR,
4692 "%s: mac_addr_list : memory allocation failed",
4693 vdev->ndev->name);
4694 ret = -EPERM;
4695 goto _exit6;
4696 }
4697 macaddr = (u8 *)&entry->macaddr;
4698 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4699 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4700 vdev->vpaths[i].mac_addr_cnt = 1;
4701 }
4702
4703 kfree(device_config);
4704
4705 /*
4706 * INTA is shared in multi-function mode. This is unlike the INTA
4707 * implementation in MR mode, where each VH has its own INTA message.
4708 * - INTA is masked (disabled) as long as at least one function sets
4709 * its TITAN_MASK_ALL_INT.ALARM bit.
4710 * - INTA is unmasked (enabled) when all enabled functions have cleared
4711 * their own TITAN_MASK_ALL_INT.ALARM bit.
4712 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4713 * Though this driver leaves the top level interrupts unmasked while
4714 * leaving the required module interrupt bits masked on exit, there
4715 * could be a rougue driver around that does not follow this procedure
4716 * resulting in a failure to generate interrupts. The following code is
4717 * present to prevent such a failure.
4718 */
4719
4720 if (ll_config->device_hw_info.function_mode ==
4721 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4722 if (vdev->config.intr_type == INTA)
4723 vxge_hw_device_unmask_all(hldev);
4724
4725 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4726 vdev->ndev->name, __func__, __LINE__);
4727
4728 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4729 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4730 vxge_hw_device_trace_level_get(hldev));
4731
4732 kfree(ll_config);
4733 return 0;
4734
4735_exit6:
4736 for (i = 0; i < vdev->no_of_vpath; i++)
4737 vxge_free_mac_add_list(&vdev->vpaths[i]);
4738_exit5:
4739 vxge_device_unregister(hldev);
4740_exit4:
4741 pci_set_drvdata(pdev, NULL);
4742 vxge_hw_device_terminate(hldev);
4743 pci_disable_sriov(pdev);
4744_exit3:
4745 iounmap(attr.bar0);
4746_exit2:
4747 pci_release_region(pdev, 0);
4748_exit1:
4749 pci_disable_device(pdev);
4750_exit0:
4751 kfree(ll_config);
4752 kfree(device_config);
4753 driver_config->config_dev_cnt--;
4754 driver_config->total_dev_cnt--;
4755 return ret;
4756}
4757
4758/**
4759 * vxge_rem_nic - Free the PCI device
4760 * @pdev: structure containing the PCI related information of the device.
4761 * Description: This function is called by the Pci subsystem to release a
4762 * PCI device and free up all resource held up by the device.
4763 */
4764static void __devexit vxge_remove(struct pci_dev *pdev)
4765{
4766 struct __vxge_hw_device *hldev;
4767 struct vxgedev *vdev;
4768 int i;
4769
4770 hldev = pci_get_drvdata(pdev);
4771 if (hldev == NULL)
4772 return;
4773
4774 vdev = netdev_priv(hldev->ndev);
4775
4776 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4777 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4778 __func__);
4779
4780 for (i = 0; i < vdev->no_of_vpath; i++)
4781 vxge_free_mac_add_list(&vdev->vpaths[i]);
4782
4783 vxge_device_unregister(hldev);
4784 pci_set_drvdata(pdev, NULL);
4785 /* Do not call pci_disable_sriov here, as it will break child devices */
4786 vxge_hw_device_terminate(hldev);
4787 iounmap(vdev->bar0);
4788 pci_release_region(pdev, 0);
4789 pci_disable_device(pdev);
4790 driver_config->config_dev_cnt--;
4791 driver_config->total_dev_cnt--;
4792
4793 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4794 __func__, __LINE__);
4795 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4796 __LINE__);
4797}
4798
4799static struct pci_error_handlers vxge_err_handler = {
4800 .error_detected = vxge_io_error_detected,
4801 .slot_reset = vxge_io_slot_reset,
4802 .resume = vxge_io_resume,
4803};
4804
4805static struct pci_driver vxge_driver = {
4806 .name = VXGE_DRIVER_NAME,
4807 .id_table = vxge_id_table,
4808 .probe = vxge_probe,
4809 .remove = __devexit_p(vxge_remove),
4810#ifdef CONFIG_PM
4811 .suspend = vxge_pm_suspend,
4812 .resume = vxge_pm_resume,
4813#endif
4814 .err_handler = &vxge_err_handler,
4815};
4816
4817static int __init
4818vxge_starter(void)
4819{
4820 int ret = 0;
4821
4822 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4823 pr_info("Driver version: %s\n", DRV_VERSION);
4824
4825 verify_bandwidth();
4826
4827 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4828 if (!driver_config)
4829 return -ENOMEM;
4830
4831 ret = pci_register_driver(&vxge_driver);
4832 if (ret) {
4833 kfree(driver_config);
4834 goto err;
4835 }
4836
4837 if (driver_config->config_dev_cnt &&
4838 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4839 vxge_debug_init(VXGE_ERR,
4840 "%s: Configured %d of %d devices",
4841 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4842 driver_config->total_dev_cnt);
4843err:
4844 return ret;
4845}
4846
4847static void __exit
4848vxge_closer(void)
4849{
4850 pci_unregister_driver(&vxge_driver);
4851 kfree(driver_config);
4852}
4853module_init(vxge_starter);
4854module_exit(vxge_closer);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
new file mode 100644
index 000000000000..f52a42d1dbb7
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -0,0 +1,519 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-main.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef VXGE_MAIN_H
15#define VXGE_MAIN_H
16
17#include "vxge-traffic.h"
18#include "vxge-config.h"
19#include "vxge-version.h"
20#include <linux/list.h>
21#include <linux/bitops.h>
22#include <linux/if_vlan.h>
23
24#define VXGE_DRIVER_NAME "vxge"
25#define VXGE_DRIVER_VENDOR "Neterion, Inc"
26#define VXGE_DRIVER_FW_VERSION_MAJOR 1
27
28#define DRV_VERSION VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\
29 VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\
30 VXGE_VERSION_FOR
31
32#define PCI_DEVICE_ID_TITAN_WIN 0x5733
33#define PCI_DEVICE_ID_TITAN_UNI 0x5833
34#define VXGE_HW_TITAN1_PCI_REVISION 1
35#define VXGE_HW_TITAN1A_PCI_REVISION 2
36
37#define VXGE_USE_DEFAULT 0xffffffff
38#define VXGE_HW_VPATH_MSIX_ACTIVE 4
39#define VXGE_ALARM_MSIX_ID 2
40#define VXGE_HW_RXSYNC_FREQ_CNT 4
41#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
42#define VXGE_LL_RX_COPY_THRESHOLD 256
43#define VXGE_DEF_FIFO_LENGTH 84
44
45#define NO_STEERING 0
46#define PORT_STEERING 0x1
47#define RTH_STEERING 0x2
48#define RX_TOS_STEERING 0x3
49#define RX_VLAN_STEERING 0x4
50#define RTH_BUCKET_SIZE 4
51
52#define TX_PRIORITY_STEERING 1
53#define TX_VLAN_STEERING 2
54#define TX_PORT_STEERING 3
55#define TX_MULTIQ_STEERING 4
56
57#define VXGE_HW_MAC_ADDR_LEARN_DEFAULT VXGE_HW_RTS_MAC_DISABLE
58
59#define VXGE_TTI_BTIMER_VAL 250000
60
61#define VXGE_TTI_LTIMER_VAL 1000
62#define VXGE_T1A_TTI_LTIMER_VAL 80
63#define VXGE_TTI_RTIMER_VAL 0
64#define VXGE_TTI_RTIMER_ADAPT_VAL 10
65#define VXGE_T1A_TTI_RTIMER_VAL 400
66#define VXGE_RTI_BTIMER_VAL 250
67#define VXGE_RTI_LTIMER_VAL 100
68#define VXGE_RTI_RTIMER_VAL 0
69#define VXGE_RTI_RTIMER_ADAPT_VAL 15
70#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
71#define VXGE_ISR_POLLING_CNT 8
72#define VXGE_MAX_CONFIG_DEV 0xFF
73#define VXGE_EXEC_MODE_DISABLE 0
74#define VXGE_EXEC_MODE_ENABLE 1
75#define VXGE_MAX_CONFIG_PORT 1
76#define VXGE_ALL_VID_DISABLE 0
77#define VXGE_ALL_VID_ENABLE 1
78#define VXGE_PAUSE_CTRL_DISABLE 0
79#define VXGE_PAUSE_CTRL_ENABLE 1
80
81#define TTI_TX_URANGE_A 5
82#define TTI_TX_URANGE_B 15
83#define TTI_TX_URANGE_C 40
84#define TTI_TX_UFC_A 5
85#define TTI_TX_UFC_B 40
86#define TTI_TX_UFC_C 60
87#define TTI_TX_UFC_D 100
88#define TTI_T1A_TX_UFC_A 30
89#define TTI_T1A_TX_UFC_B 80
90/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
91/* Slope - 93 */
92/* 60 - 9k Mtu, 140 - 1.5k mtu */
93#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
94
95/* Slope - 37 */
96/* 100 - 9k Mtu, 300 - 1.5k mtu */
97#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
98
99
100#define RTI_RX_URANGE_A 5
101#define RTI_RX_URANGE_B 15
102#define RTI_RX_URANGE_C 40
103#define RTI_T1A_RX_URANGE_A 1
104#define RTI_T1A_RX_URANGE_B 20
105#define RTI_T1A_RX_URANGE_C 50
106#define RTI_RX_UFC_A 1
107#define RTI_RX_UFC_B 5
108#define RTI_RX_UFC_C 10
109#define RTI_RX_UFC_D 15
110#define RTI_T1A_RX_UFC_B 20
111#define RTI_T1A_RX_UFC_C 50
112#define RTI_T1A_RX_UFC_D 60
113
114/*
115 * The interrupt rate is maintained at 3k per second with the moderation
116 * parameters for most traffic but not all. This is the maximum interrupt
117 * count allowed per function with INTA or per vector in the case of
118 * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
119 */
120#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
121#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
122
123/* Milli secs timer period */
124#define VXGE_TIMER_DELAY 10000
125
126#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
127
128#define is_sriov(function_mode) \
129 ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
130 (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
131 (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
132
133enum vxge_reset_event {
134 /* reset events */
135 VXGE_LL_VPATH_RESET = 0,
136 VXGE_LL_DEVICE_RESET = 1,
137 VXGE_LL_FULL_RESET = 2,
138 VXGE_LL_START_RESET = 3,
139 VXGE_LL_COMPL_RESET = 4
140};
141/* These flags represent the devices temporary state */
142enum vxge_device_state_t {
143__VXGE_STATE_RESET_CARD = 0,
144__VXGE_STATE_CARD_UP
145};
146
147enum vxge_mac_addr_state {
148 /* mac address states */
149 VXGE_LL_MAC_ADDR_IN_LIST = 0,
150 VXGE_LL_MAC_ADDR_IN_DA_TABLE = 1
151};
152
153struct vxge_drv_config {
154 int config_dev_cnt;
155 int total_dev_cnt;
156 int g_no_cpus;
157 unsigned int vpath_per_dev;
158};
159
160struct macInfo {
161 unsigned char macaddr[ETH_ALEN];
162 unsigned char macmask[ETH_ALEN];
163 unsigned int vpath_no;
164 enum vxge_mac_addr_state state;
165};
166
167struct vxge_config {
168 int tx_pause_enable;
169 int rx_pause_enable;
170
171#define NEW_NAPI_WEIGHT 64
172 int napi_weight;
173 int intr_type;
174#define INTA 0
175#define MSI 1
176#define MSI_X 2
177
178 int addr_learn_en;
179
180 u32 rth_steering:2,
181 rth_algorithm:2,
182 rth_hash_type_tcpipv4:1,
183 rth_hash_type_ipv4:1,
184 rth_hash_type_tcpipv6:1,
185 rth_hash_type_ipv6:1,
186 rth_hash_type_tcpipv6ex:1,
187 rth_hash_type_ipv6ex:1,
188 rth_bkt_sz:8;
189 int rth_jhash_golden_ratio;
190 int tx_steering_type;
191 int fifo_indicate_max_pkts;
192 struct vxge_hw_device_hw_info device_hw_info;
193};
194
195struct vxge_msix_entry {
196 /* Mimicing the msix_entry struct of Kernel. */
197 u16 vector;
198 u16 entry;
199 u16 in_use;
200 void *arg;
201};
202
203/* Software Statistics */
204
205struct vxge_sw_stats {
206
207 /* Virtual Path */
208 unsigned long vpaths_open;
209 unsigned long vpath_open_fail;
210
211 /* Misc. */
212 unsigned long link_up;
213 unsigned long link_down;
214};
215
216struct vxge_mac_addrs {
217 struct list_head item;
218 u64 macaddr;
219 u64 macmask;
220 enum vxge_mac_addr_state state;
221};
222
223struct vxgedev;
224
225struct vxge_fifo_stats {
226 struct u64_stats_sync syncp;
227 u64 tx_frms;
228 u64 tx_bytes;
229
230 unsigned long tx_errors;
231 unsigned long txd_not_free;
232 unsigned long txd_out_of_desc;
233 unsigned long pci_map_fail;
234};
235
236struct vxge_fifo {
237 struct net_device *ndev;
238 struct pci_dev *pdev;
239 struct __vxge_hw_fifo *handle;
240 struct netdev_queue *txq;
241
242 int tx_steering_type;
243 int indicate_max_pkts;
244
245 /* Adaptive interrupt moderation parameters used in T1A */
246 unsigned long interrupt_count;
247 unsigned long jiffies;
248
249 u32 tx_vector_no;
250 /* Tx stats */
251 struct vxge_fifo_stats stats;
252} ____cacheline_aligned;
253
254struct vxge_ring_stats {
255 struct u64_stats_sync syncp;
256 u64 rx_frms;
257 u64 rx_mcast;
258 u64 rx_bytes;
259
260 unsigned long rx_errors;
261 unsigned long rx_dropped;
262 unsigned long prev_rx_frms;
263 unsigned long pci_map_fail;
264 unsigned long skb_alloc_fail;
265};
266
267struct vxge_ring {
268 struct net_device *ndev;
269 struct pci_dev *pdev;
270 struct __vxge_hw_ring *handle;
271 /* The vpath id maintained in the driver -
272 * 0 to 'maximum_vpaths_in_function - 1'
273 */
274 int driver_id;
275
276 /* Adaptive interrupt moderation parameters used in T1A */
277 unsigned long interrupt_count;
278 unsigned long jiffies;
279
280 /* copy of the flag indicating whether rx_hwts is to be used */
281 u32 rx_hwts:1;
282
283 int pkts_processed;
284 int budget;
285
286 struct napi_struct napi;
287 struct napi_struct *napi_p;
288
289#define VXGE_MAX_MAC_ADDR_COUNT 30
290
291 int vlan_tag_strip;
292 u32 rx_vector_no;
293 enum vxge_hw_status last_status;
294
295 /* Rx stats */
296 struct vxge_ring_stats stats;
297} ____cacheline_aligned;
298
299struct vxge_vpath {
300 struct vxge_fifo fifo;
301 struct vxge_ring ring;
302
303 struct __vxge_hw_vpath_handle *handle;
304
305 /* Actual vpath id for this vpath in the device - 0 to 16 */
306 int device_id;
307 int max_mac_addr_cnt;
308 int is_configured;
309 int is_open;
310 struct vxgedev *vdev;
311 u8 macaddr[ETH_ALEN];
312 u8 macmask[ETH_ALEN];
313
314#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
315 /* mac addresses currently programmed into NIC */
316 u16 mac_addr_cnt;
317 u16 mcast_addr_cnt;
318 struct list_head mac_addr_list;
319
320 u32 level_err;
321 u32 level_trace;
322};
323#define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) { \
324 for (i = 0; i < vdev->no_of_vpath; i++) { \
325 vdev->vpaths[i].level_err = err; \
326 vdev->vpaths[i].level_trace = trace; \
327 } \
328 vdev->level_err = err; \
329 vdev->level_trace = trace; \
330}
331
332struct vxgedev {
333 struct net_device *ndev;
334 struct pci_dev *pdev;
335 struct __vxge_hw_device *devh;
336 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
337 int vlan_tag_strip;
338 struct vxge_config config;
339 unsigned long state;
340
341 /* Indicates which vpath to reset */
342 unsigned long vp_reset;
343
344 /* Timer used for polling vpath resets */
345 struct timer_list vp_reset_timer;
346
347 /* Timer used for polling vpath lockup */
348 struct timer_list vp_lockup_timer;
349
350 /*
351 * Flags to track whether device is in All Multicast
352 * or in promiscuous mode.
353 */
354 u16 all_multi_flg;
355
356 /* A flag indicating whether rx_hwts is to be used or not. */
357 u32 rx_hwts:1,
358 titan1:1;
359
360 struct vxge_msix_entry *vxge_entries;
361 struct msix_entry *entries;
362 /*
363 * 4 for each vpath * 17;
364 * total is 68
365 */
366#define VXGE_MAX_REQUESTED_MSIX 68
367#define VXGE_INTR_STRLEN 80
368 char desc[VXGE_MAX_REQUESTED_MSIX][VXGE_INTR_STRLEN];
369
370 enum vxge_hw_event cric_err_event;
371
372 int max_vpath_supported;
373 int no_of_vpath;
374
375 struct napi_struct napi;
376 /* A debug option, when enabled and if error condition occurs,
377 * the driver will do following steps:
378 * - mask all interrupts
379 * - Not clear the source of the alarm
380 * - gracefully stop all I/O
381 * A diagnostic dump of register and stats at this point
382 * reveals very useful information.
383 */
384 int exec_mode;
385 int max_config_port;
386 struct vxge_vpath *vpaths;
387
388 struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS];
389 void __iomem *bar0;
390 struct vxge_sw_stats stats;
391 int mtu;
392 /* Below variables are used for vpath selection to transmit a packet */
393 u8 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS];
394 u64 vpaths_deployed;
395
396 u32 intr_cnt;
397 u32 level_err;
398 u32 level_trace;
399 char fw_version[VXGE_HW_FW_STRLEN];
400 struct work_struct reset_task;
401};
402
403struct vxge_rx_priv {
404 struct sk_buff *skb;
405 unsigned char *skb_data;
406 dma_addr_t data_dma;
407 dma_addr_t data_size;
408};
409
410struct vxge_tx_priv {
411 struct sk_buff *skb;
412 dma_addr_t dma_buffers[MAX_SKB_FRAGS+1];
413};
414
415#define VXGE_MODULE_PARAM_INT(p, val) \
416 static int p = val; \
417 module_param(p, int, 0)
418
419#define vxge_os_timer(timer, handle, arg, exp) do { \
420 init_timer(&timer); \
421 timer.function = handle; \
422 timer.data = (unsigned long) arg; \
423 mod_timer(&timer, (jiffies + exp)); \
424 } while (0);
425
426void vxge_initialize_ethtool_ops(struct net_device *ndev);
427enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
428int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
429
430/**
431 * #define VXGE_DEBUG_INIT: debug for initialization functions
432 * #define VXGE_DEBUG_TX : debug transmit related functions
433 * #define VXGE_DEBUG_RX : debug recevice related functions
434 * #define VXGE_DEBUG_MEM : debug memory module
435 * #define VXGE_DEBUG_LOCK: debug locks
436 * #define VXGE_DEBUG_SEM : debug semaphore
437 * #define VXGE_DEBUG_ENTRYEXIT: debug functions by adding entry exit statements
438*/
439#define VXGE_DEBUG_INIT 0x00000001
440#define VXGE_DEBUG_TX 0x00000002
441#define VXGE_DEBUG_RX 0x00000004
442#define VXGE_DEBUG_MEM 0x00000008
443#define VXGE_DEBUG_LOCK 0x00000010
444#define VXGE_DEBUG_SEM 0x00000020
445#define VXGE_DEBUG_ENTRYEXIT 0x00000040
446#define VXGE_DEBUG_INTR 0x00000080
447#define VXGE_DEBUG_LL_CONFIG 0x00000100
448
449/* Debug tracing for VXGE driver */
450#ifndef VXGE_DEBUG_MASK
451#define VXGE_DEBUG_MASK 0x0
452#endif
453
454#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
455#define vxge_debug_ll_config(level, fmt, ...) \
456 vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__)
457#else
458#define vxge_debug_ll_config(level, fmt, ...)
459#endif
460
461#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
462#define vxge_debug_init(level, fmt, ...) \
463 vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__)
464#else
465#define vxge_debug_init(level, fmt, ...)
466#endif
467
468#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
469#define vxge_debug_tx(level, fmt, ...) \
470 vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__)
471#else
472#define vxge_debug_tx(level, fmt, ...)
473#endif
474
475#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
476#define vxge_debug_rx(level, fmt, ...) \
477 vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__)
478#else
479#define vxge_debug_rx(level, fmt, ...)
480#endif
481
482#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
483#define vxge_debug_mem(level, fmt, ...) \
484 vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__)
485#else
486#define vxge_debug_mem(level, fmt, ...)
487#endif
488
489#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
490#define vxge_debug_entryexit(level, fmt, ...) \
491 vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__)
492#else
493#define vxge_debug_entryexit(level, fmt, ...)
494#endif
495
496#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
497#define vxge_debug_intr(level, fmt, ...) \
498 vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__)
499#else
500#define vxge_debug_intr(level, fmt, ...)
501#endif
502
503#define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\
504 vxge_hw_device_debug_set((struct __vxge_hw_device *)vdev->devh, \
505 level, mask);\
506 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, \
507 vxge_hw_device_error_level_get((struct __vxge_hw_device *) \
508 vdev->devh), \
509 vxge_hw_device_trace_level_get((struct __vxge_hw_device *) \
510 vdev->devh));\
511}
512
513#ifdef NETIF_F_GSO
514#define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size)
515#define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size)
516#define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type)
517#endif
518
519#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-reg.h b/drivers/net/ethernet/neterion/vxge/vxge-reg.h
new file mode 100644
index 000000000000..3e658b175947
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-reg.h
@@ -0,0 +1,4636 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-reg.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O Virtualized
11 * Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef VXGE_REG_H
15#define VXGE_REG_H
16
17/*
18 * vxge_mBIT(loc) - set bit at offset
19 */
20#define vxge_mBIT(loc) (0x8000000000000000ULL >> (loc))
21
22/*
23 * vxge_vBIT(val, loc, sz) - set bits at offset
24 */
25#define vxge_vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
26#define vxge_vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
27
28/*
29 * vxge_bVALn(bits, loc, n) - Get the value of n bits at location
30 */
31#define vxge_bVALn(bits, loc, n) \
32 ((((u64)bits) >> (64-(loc+n))) & ((0x1ULL << n) - 1))
33
34#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(bits) \
35 vxge_bVALn(bits, 0, 16)
36#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(bits) \
37 vxge_bVALn(bits, 48, 8)
38#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(bits) \
39 vxge_bVALn(bits, 56, 8)
40
41#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(bits) \
42 vxge_bVALn(bits, 3, 5)
43#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(bits) \
44 vxge_bVALn(bits, 5, 3)
45#define VXGE_HW_PF_SW_RESET_COMMAND 0xA5
46
47#define VXGE_HW_TITAN_PCICFGMGMT_REG_SPACES 17
48#define VXGE_HW_TITAN_SRPCIM_REG_SPACES 17
49#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
50#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
51
52#define VXGE_HW_FW_API_GET_EPROM_REV 31
53
54#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
55#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
56#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
57#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
58
59#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
60#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
61#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
62#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
63#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
64
65#define VXGE_HW_FW_API_GET_FUNC_MODE 29
66#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
67
68#define VXGE_HW_FW_UPGRADE_MEMO 13
69#define VXGE_HW_FW_UPGRADE_ACTION 16
70#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
71#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
72#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
73#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
74
75#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
76#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
77#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
78
79#define VXGE_HW_ASIC_MODE_RESERVED 0
80#define VXGE_HW_ASIC_MODE_NO_IOV 1
81#define VXGE_HW_ASIC_MODE_SR_IOV 2
82#define VXGE_HW_ASIC_MODE_MR_IOV 3
83
84#define VXGE_HW_TXMAC_GEN_CFG1_TMAC_PERMA_STOP_EN vxge_mBIT(3)
85#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_WIRE vxge_mBIT(19)
86#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_SWITCH vxge_mBIT(23)
87#define VXGE_HW_TXMAC_GEN_CFG1_HOST_APPEND_FCS vxge_mBIT(31)
88
89#define VXGE_HW_VPATH_IS_FIRST_GET_VPATH_IS_FIRST(bits) vxge_bVALn(bits, 3, 1)
90
91#define VXGE_HW_TIM_VPATH_ASSIGNMENT_GET_BMAP_ROOT(bits) \
92 vxge_bVALn(bits, 0, 32)
93
94#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN(bits) \
95 vxge_bVALn(bits, 50, 14)
96
97#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_GET_VSPORT_VECTOR(bits) \
98 vxge_bVALn(bits, 0, 17)
99
100#define VXGE_HW_XMAC_VPATH_TO_VSPORT_VPMGMT_CLONE_GET_VSPORT_NUMBER(bits) \
101 vxge_bVALn(bits, 3, 5)
102
103#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(bits) \
104 vxge_bVALn(bits, 17, 15)
105
106#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_LEGACY_MODE 0
107#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY 1
108#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_MULTI_OP_MODE 2
109
110#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MESSAGES_ONLY 0
111#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MULTI_OP_MODE 1
112
113#define VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val) \
114 (val&~VXGE_HW_TOC_KDFC_INITIAL_BIR(7))
115#define VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val) \
116 vxge_bVALn(val, 61, 3)
117#define VXGE_HW_TOC_GET_USDC_INITIAL_OFFSET(val) \
118 (val&~VXGE_HW_TOC_USDC_INITIAL_BIR(7))
119#define VXGE_HW_TOC_GET_USDC_INITIAL_BIR(val) \
120 vxge_bVALn(val, 61, 3)
121
122#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(bits) bits
123#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_GET_TOC_KDFC_FIFO_STRIDE(bits) bits
124
125#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR0(bits) \
126 vxge_bVALn(bits, 1, 15)
127#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR1(bits) \
128 vxge_bVALn(bits, 17, 15)
129#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR2(bits) \
130 vxge_bVALn(bits, 33, 15)
131
132#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_VAPTH_NUM(val) vxge_vBIT(val, 42, 5)
133#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_NUM(val) vxge_vBIT(val, 47, 2)
134#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_OFFSET(val) \
135 vxge_vBIT(val, 49, 15)
136
137#define VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER 0
138#define VXGE_HW_PRC_CFG4_RING_MODE_THREE_BUFFER 1
139#define VXGE_HW_PRC_CFG4_RING_MODE_FIVE_BUFFER 2
140
141#define VXGE_HW_PRC_CFG7_SCATTER_MODE_A 0
142#define VXGE_HW_PRC_CFG7_SCATTER_MODE_B 2
143#define VXGE_HW_PRC_CFG7_SCATTER_MODE_C 1
144
145#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_READ 0
146#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_WRITE 1
147
148#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DA 0
149#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_VID 1
150#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
151#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_PN 3
152#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RANGE_PN 4
153#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
154#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
155#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
156#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
157#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
158#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
159#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DS 11
160#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
161#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_FW_VERSION 13
162
163#define VXGE_HW_RTS_MGR_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
164 vxge_bVALn(bits, 0, 48)
165#define VXGE_HW_RTS_MGR_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
166
167#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
168 vxge_bVALn(bits, 0, 48)
169#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MASK(val) vxge_vBIT(val, 0, 48)
170#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_PRIVILEGED_MODE \
171 vxge_mBIT(54)
172#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_VPATH(bits) \
173 vxge_bVALn(bits, 55, 5)
174#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_VPATH(val) \
175 vxge_vBIT(val, 55, 5)
176#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_MODE(bits) \
177 vxge_bVALn(bits, 62, 2)
178#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MODE(val) vxge_vBIT(val, 62, 2)
179
180#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY 0
181#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY 1
182#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY 2
183#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY 3
184#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY 0
185#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY 1
186#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY 3
187#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL 4
188#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ALL_CLEAR 172
189
190#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA 0
191#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID 1
192#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
193#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
194#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
195#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
196#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
197#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
198#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
199#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
200#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
201#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
202#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
203
204#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
205 vxge_bVALn(bits, 0, 48)
206#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
207
208#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(bits) vxge_bVALn(bits, 0, 12)
209#define VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(val) vxge_vBIT(val, 0, 12)
210
211#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_ETYPE(bits) vxge_bVALn(bits, 0, 11)
212#define VXGE_HW_RTS_ACCESS_STEER_DATA0_ETYPE(val) vxge_vBIT(val, 0, 16)
213
214#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_SRC_DEST_SEL(bits) \
215 vxge_bVALn(bits, 3, 1)
216#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_SRC_DEST_SEL vxge_mBIT(3)
217#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_TCP_UDP_SEL(bits) \
218 vxge_bVALn(bits, 7, 1)
219#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_TCP_UDP_SEL vxge_mBIT(7)
220#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_PORT_NUM(bits) \
221 vxge_bVALn(bits, 8, 16)
222#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_PORT_NUM(val) vxge_vBIT(val, 8, 16)
223
224#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_EN(bits) \
225 vxge_bVALn(bits, 3, 1)
226#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN vxge_mBIT(3)
227#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_BUCKET_SIZE(bits) \
228 vxge_bVALn(bits, 4, 4)
229#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(val) \
230 vxge_vBIT(val, 4, 4)
231#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ALG_SEL(bits) \
232 vxge_bVALn(bits, 10, 2)
233#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(val) \
234 vxge_vBIT(val, 10, 2)
235#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_JENKINS 0
236#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_MS_RSS 1
237#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_CRC32C 2
238#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV4_EN(bits) \
239 vxge_bVALn(bits, 15, 1)
240#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN vxge_mBIT(15)
241#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV4_EN(bits) \
242 vxge_bVALn(bits, 19, 1)
243#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN vxge_mBIT(19)
244#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EN(bits) \
245 vxge_bVALn(bits, 23, 1)
246#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN vxge_mBIT(23)
247#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EN(bits) \
248 vxge_bVALn(bits, 27, 1)
249#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN vxge_mBIT(27)
250#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EX_EN(bits) \
251 vxge_bVALn(bits, 31, 1)
252#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN vxge_mBIT(31)
253#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EX_EN(bits) \
254 vxge_bVALn(bits, 35, 1)
255#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN vxge_mBIT(35)
256#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(bits) \
257 vxge_bVALn(bits, 39, 1)
258#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE vxge_mBIT(39)
259#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_REPL_ENTRY_EN(bits) \
260 vxge_bVALn(bits, 43, 1)
261#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_REPL_ENTRY_EN vxge_mBIT(43)
262
263#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_ENTRY_EN(bits) \
264 vxge_bVALn(bits, 3, 1)
265#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN vxge_mBIT(3)
266#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_BUCKET_DATA(bits) \
267 vxge_bVALn(bits, 9, 7)
268#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(val) \
269 vxge_vBIT(val, 9, 7)
270
271#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_NUM(bits) \
272 vxge_bVALn(bits, 0, 8)
273#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(val) \
274 vxge_vBIT(val, 0, 8)
275#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_ENTRY_EN(bits) \
276 vxge_bVALn(bits, 8, 1)
277#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
278#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_DATA(bits) \
279 vxge_bVALn(bits, 9, 7)
280#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(val) \
281 vxge_vBIT(val, 9, 7)
282#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_NUM(bits) \
283 vxge_bVALn(bits, 16, 8)
284#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(val) \
285 vxge_vBIT(val, 16, 8)
286#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_ENTRY_EN(bits) \
287 vxge_bVALn(bits, 24, 1)
288#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
289#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_DATA(bits) \
290 vxge_bVALn(bits, 25, 7)
291#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(val) \
292 vxge_vBIT(val, 25, 7)
293#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_NUM(bits) \
294 vxge_bVALn(bits, 0, 8)
295#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(val) \
296 vxge_vBIT(val, 0, 8)
297#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_ENTRY_EN(bits) \
298 vxge_bVALn(bits, 8, 1)
299#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
300#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_DATA(bits) \
301 vxge_bVALn(bits, 9, 7)
302#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(val) \
303 vxge_vBIT(val, 9, 7)
304#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_NUM(bits) \
305 vxge_bVALn(bits, 16, 8)
306#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(val) \
307 vxge_vBIT(val, 16, 8)
308#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_ENTRY_EN(bits) \
309 vxge_bVALn(bits, 24, 1)
310#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
311#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_DATA(bits) \
312 vxge_bVALn(bits, 25, 7)
313#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(val) \
314 vxge_vBIT(val, 25, 7)
315
316#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_GOLDEN_RATIO(bits) \
317 vxge_bVALn(bits, 0, 32)
318#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_GOLDEN_RATIO(val) \
319 vxge_vBIT(val, 0, 32)
320#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_INIT_VALUE(bits) \
321 vxge_bVALn(bits, 32, 32)
322#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_INIT_VALUE(val) \
323 vxge_vBIT(val, 32, 32)
324
325#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_SA_MASK(bits) \
326 vxge_bVALn(bits, 0, 16)
327#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_SA_MASK(val) \
328 vxge_vBIT(val, 0, 16)
329#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_DA_MASK(bits) \
330 vxge_bVALn(bits, 16, 16)
331#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_DA_MASK(val) \
332 vxge_vBIT(val, 16, 16)
333#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_SA_MASK(bits) \
334 vxge_bVALn(bits, 32, 4)
335#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_SA_MASK(val) \
336 vxge_vBIT(val, 32, 4)
337#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_DA_MASK(bits) \
338 vxge_bVALn(bits, 36, 4)
339#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_DA_MASK(val) \
340 vxge_vBIT(val, 36, 4)
341#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4SP_MASK(bits) \
342 vxge_bVALn(bits, 40, 2)
343#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4SP_MASK(val) \
344 vxge_vBIT(val, 40, 2)
345#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4DP_MASK(bits) \
346 vxge_bVALn(bits, 42, 2)
347#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4DP_MASK(val) \
348 vxge_vBIT(val, 42, 2)
349
350#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_KEY_KEY(bits) \
351 vxge_bVALn(bits, 0, 64)
352#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_KEY_KEY vxge_vBIT(val, 0, 64)
353
354#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_QOS_ENTRY_EN(bits) \
355 vxge_bVALn(bits, 3, 1)
356#define VXGE_HW_RTS_ACCESS_STEER_DATA0_QOS_ENTRY_EN vxge_mBIT(3)
357
358#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DS_ENTRY_EN(bits) \
359 vxge_bVALn(bits, 3, 1)
360#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DS_ENTRY_EN vxge_mBIT(3)
361
362#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
363 vxge_bVALn(bits, 0, 48)
364#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(val) \
365 vxge_vBIT(val, 0, 48)
366#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(val) \
367 vxge_vBIT(val, 62, 2)
368
369#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_NUM(bits) \
370 vxge_bVALn(bits, 0, 8)
371#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_NUM(val) \
372 vxge_vBIT(val, 0, 8)
373#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_ENTRY_EN(bits) \
374 vxge_bVALn(bits, 8, 1)
375#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_ENTRY_EN vxge_mBIT(8)
376#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_DATA(bits) \
377 vxge_bVALn(bits, 9, 7)
378#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_DATA(val) \
379 vxge_vBIT(val, 9, 7)
380#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_NUM(bits) \
381 vxge_bVALn(bits, 16, 8)
382#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_NUM(val) \
383 vxge_vBIT(val, 16, 8)
384#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_ENTRY_EN(bits) \
385 vxge_bVALn(bits, 24, 1)
386#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_ENTRY_EN vxge_mBIT(24)
387#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_DATA(bits) \
388 vxge_bVALn(bits, 25, 7)
389#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_DATA(val) \
390 vxge_vBIT(val, 25, 7)
391#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_NUM(bits) \
392 vxge_bVALn(bits, 32, 8)
393#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_NUM(val) \
394 vxge_vBIT(val, 32, 8)
395#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_ENTRY_EN(bits) \
396 vxge_bVALn(bits, 40, 1)
397#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_ENTRY_EN vxge_mBIT(40)
398#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_DATA(bits) \
399 vxge_bVALn(bits, 41, 7)
400#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_DATA(val) \
401 vxge_vBIT(val, 41, 7)
402#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_NUM(bits) \
403 vxge_bVALn(bits, 48, 8)
404#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_NUM(val) \
405 vxge_vBIT(val, 48, 8)
406#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_ENTRY_EN(bits) \
407 vxge_bVALn(bits, 56, 1)
408#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_ENTRY_EN vxge_mBIT(56)
409#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_DATA(bits) \
410 vxge_bVALn(bits, 57, 7)
411#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_DATA(val) \
412 vxge_vBIT(val, 57, 7)
413
414#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER 0
415#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER 1
416#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_VERSION 2
417#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE 3
418#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0 4
419#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_1 5
420#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_2 6
421#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3 7
422
423#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_ON 1
424#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_OFF 0
425
426#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(bits) \
427 vxge_bVALn(bits, 0, 8)
428#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_DAY(val) vxge_vBIT(val, 0, 8)
429#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(bits) \
430 vxge_bVALn(bits, 8, 8)
431#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MONTH(val) vxge_vBIT(val, 8, 8)
432#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(bits) \
433 vxge_bVALn(bits, 16, 16)
434#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_YEAR(val) \
435 vxge_vBIT(val, 16, 16)
436
437#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(bits) \
438 vxge_bVALn(bits, 32, 8)
439#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MAJOR vxge_vBIT(val, 32, 8)
440#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(bits) \
441 vxge_bVALn(bits, 40, 8)
442#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MINOR vxge_vBIT(val, 40, 8)
443#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(bits) \
444 vxge_bVALn(bits, 48, 16)
445#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_BUILD vxge_vBIT(val, 48, 16)
446
447#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(bits) \
448 vxge_bVALn(bits, 0, 8)
449#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_DAY(val) vxge_vBIT(val, 0, 8)
450#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(bits) \
451 vxge_bVALn(bits, 8, 8)
452#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MONTH(val) vxge_vBIT(val, 8, 8)
453#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(bits) \
454 vxge_bVALn(bits, 16, 16)
455#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_YEAR(val) \
456 vxge_vBIT(val, 16, 16)
457
458#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(bits) \
459 vxge_bVALn(bits, 32, 8)
460#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MAJOR vxge_vBIT(val, 32, 8)
461#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(bits) \
462 vxge_bVALn(bits, 40, 8)
463#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MINOR vxge_vBIT(val, 40, 8)
464#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
465 vxge_bVALn(bits, 48, 16)
466#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
467#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
468
469#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
470 vxge_bVALn(bits, 0, 18)
471
472#define VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(bits) \
473 vxge_bVALn(bits, 48, 16)
474#define VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(bits) \
475 vxge_bVALn(bits, 32, 32)
476#define VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(bits) vxge_bVALn(bits, 48, 16)
477#define VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(bits) \
478 vxge_bVALn(bits, 0, 32)
479#define VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(bits) \
480 vxge_bVALn(bits, 0, 32)
481#define VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(bits) \
482 vxge_bVALn(bits, 0, 32)
483#define VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(bits) (bits)
484#define VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(bits) (bits)
485#define VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(bits) \
486 vxge_bVALn(bits, 32, 32)
487#define VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(bits) \
488 vxge_bVALn(bits, 32, 32)
489#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(bits) \
490 vxge_bVALn(bits, 0, 32)
491#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(bits) \
492 vxge_bVALn(bits, 32, 32)
493#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(bits) \
494 vxge_bVALn(bits, 0, 32)
495#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(bits) \
496 vxge_bVALn(bits, 32, 32)
497#define VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(bits) \
498 vxge_bVALn(bits, 0, 32)
499#define VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(bits) \
500 vxge_bVALn(bits, 32, 32)
501#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(bits\
502) vxge_bVALn(bits, 48, 16)
503#define VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(bits) vxge_bVALn(bits, 0, 16)
504#define VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(bits) \
505 vxge_bVALn(bits, 16, 16)
506#define VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(bits) \
507 vxge_bVALn(bits, 32, 16)
508#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(bits) vxge_bVALn(bits, 0, 16)
509#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(bits) \
510 vxge_bVALn(bits, 16, 16)
511#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(bits) \
512 vxge_bVALn(bits, 32, 16)
513
514#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_WR_DROP(bits) \
515 vxge_bVALn(bits, 0, 32)
516#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_RD_DROP(bits) \
517 vxge_bVALn(bits, 32, 32)
518#define VXGE_HW_MRPCIM_DEBUG_STATS1_GET_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(bits\
519) vxge_bVALn(bits, 32, 32)
520#define VXGE_HW_MRPCIM_DEBUG_STATS2_GET_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(bits\
521) vxge_bVALn(bits, 32, 32)
522#define \
523VXGE_HW_MRPCIM_DEBUG_STATS3_GET_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(bits) \
524 vxge_bVALn(bits, 32, 32)
525#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_WR_VPIN_DROP(bits) \
526 vxge_bVALn(bits, 0, 32)
527#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_RD_VPIN_DROP(bits) \
528 vxge_bVALn(bits, 32, 32)
529#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT1(bits) \
530 vxge_bVALn(bits, 0, 32)
531#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT0(bits) \
532 vxge_bVALn(bits, 32, 32)
533#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT3(bits) \
534 vxge_bVALn(bits, 0, 32)
535#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT2(bits) \
536 vxge_bVALn(bits, 32, 32)
537#define VXGE_HW_GENSTATS_COUNT4_GET_GENSTATS_COUNT4(bits) \
538 vxge_bVALn(bits, 32, 32)
539#define VXGE_HW_GENSTATS_COUNT5_GET_GENSTATS_COUNT5(bits) \
540 vxge_bVALn(bits, 32, 32)
541
542#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_MSG(bits) vxge_bVALn(bits, 0, 32)
543#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_CPL(bits) vxge_bVALn(bits, 32, 32)
544#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT0(bits) vxge_bVALn(bits, 0, 32)
545#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT1(bits) vxge_bVALn(bits, 32, 32)
546#define VXGE_HW_DEBUG_STATS2_GET_RSTDROP_CLIENT2(bits) vxge_bVALn(bits, 0, 32)
547#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_PH(bits) vxge_bVALn(bits, 0, 16)
548#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_NPH(bits) vxge_bVALn(bits, 16, 16)
549#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_CPLH(bits) vxge_bVALn(bits, 32, 16)
550#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_PD(bits) vxge_bVALn(bits, 0, 16)
551#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_NPD(bits) bVAL(bits, 16, 16)
552#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_CPLD(bits) vxge_bVALn(bits, 32, 16)
553
554#define VXGE_HW_DBG_STATS_TPA_TX_PATH_GET_TX_PERMITTED_FRMS(bits) \
555 vxge_bVALn(bits, 32, 32)
556
557#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT0_TX_ANY_FRMS(bits) \
558 vxge_bVALn(bits, 0, 8)
559#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT1_TX_ANY_FRMS(bits) \
560 vxge_bVALn(bits, 8, 8)
561#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT2_TX_ANY_FRMS(bits) \
562 vxge_bVALn(bits, 16, 8)
563
564#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT0_RX_ANY_FRMS(bits) \
565 vxge_bVALn(bits, 0, 8)
566#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT1_RX_ANY_FRMS(bits) \
567 vxge_bVALn(bits, 8, 8)
568#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT2_RX_ANY_FRMS(bits) \
569 vxge_bVALn(bits, 16, 8)
570
571#define VXGE_HW_CONFIG_PRIV_H
572
573#define VXGE_HW_SWAPPER_INITIAL_VALUE 0x0123456789abcdefULL
574#define VXGE_HW_SWAPPER_BYTE_SWAPPED 0xefcdab8967452301ULL
575#define VXGE_HW_SWAPPER_BIT_FLIPPED 0x80c4a2e691d5b3f7ULL
576#define VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED 0xf7b3d591e6a2c480ULL
577
578#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
579#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_DISABLE 0x0000000000000000ULL
580
581#define VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
582#define VXGE_HW_SWAPPER_READ_BIT_FLAP_DISABLE 0x0000000000000000ULL
583
584#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
585#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_DISABLE 0x0000000000000000ULL
586
587#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
588#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_DISABLE 0x0000000000000000ULL
589
590/*
591 * The registers are memory mapped and are native big-endian byte order. The
592 * little-endian hosts are handled by enabling hardware byte-swapping for
593 * register and dma operations.
594 */
595struct vxge_hw_legacy_reg {
596
597 u8 unused00010[0x00010];
598
599/*0x00010*/ u64 toc_swapper_fb;
600#define VXGE_HW_TOC_SWAPPER_FB_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
601/*0x00018*/ u64 pifm_rd_swap_en;
602#define VXGE_HW_PIFM_RD_SWAP_EN_PIFM_RD_SWAP_EN(val) vxge_vBIT(val, 0, 64)
603/*0x00020*/ u64 pifm_rd_flip_en;
604#define VXGE_HW_PIFM_RD_FLIP_EN_PIFM_RD_FLIP_EN(val) vxge_vBIT(val, 0, 64)
605/*0x00028*/ u64 pifm_wr_swap_en;
606#define VXGE_HW_PIFM_WR_SWAP_EN_PIFM_WR_SWAP_EN(val) vxge_vBIT(val, 0, 64)
607/*0x00030*/ u64 pifm_wr_flip_en;
608#define VXGE_HW_PIFM_WR_FLIP_EN_PIFM_WR_FLIP_EN(val) vxge_vBIT(val, 0, 64)
609/*0x00038*/ u64 toc_first_pointer;
610#define VXGE_HW_TOC_FIRST_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
611/*0x00040*/ u64 host_access_en;
612#define VXGE_HW_HOST_ACCESS_EN_HOST_ACCESS_EN(val) vxge_vBIT(val, 0, 64)
613
614} __packed;
615
616struct vxge_hw_toc_reg {
617
618 u8 unused00050[0x00050];
619
620/*0x00050*/ u64 toc_common_pointer;
621#define VXGE_HW_TOC_COMMON_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
622/*0x00058*/ u64 toc_memrepair_pointer;
623#define VXGE_HW_TOC_MEMREPAIR_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
624/*0x00060*/ u64 toc_pcicfgmgmt_pointer[17];
625#define VXGE_HW_TOC_PCICFGMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
626 u8 unused001e0[0x001e0-0x000e8];
627
628/*0x001e0*/ u64 toc_mrpcim_pointer;
629#define VXGE_HW_TOC_MRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
630/*0x001e8*/ u64 toc_srpcim_pointer[17];
631#define VXGE_HW_TOC_SRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
632 u8 unused00278[0x00278-0x00270];
633
634/*0x00278*/ u64 toc_vpmgmt_pointer[17];
635#define VXGE_HW_TOC_VPMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
636 u8 unused00390[0x00390-0x00300];
637
638/*0x00390*/ u64 toc_vpath_pointer[17];
639#define VXGE_HW_TOC_VPATH_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
640 u8 unused004a0[0x004a0-0x00418];
641
642/*0x004a0*/ u64 toc_kdfc;
643#define VXGE_HW_TOC_KDFC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
644#define VXGE_HW_TOC_KDFC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
645/*0x004a8*/ u64 toc_usdc;
646#define VXGE_HW_TOC_USDC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
647#define VXGE_HW_TOC_USDC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
648/*0x004b0*/ u64 toc_kdfc_vpath_stride;
649#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_INITIAL_TOC_KDFC_VPATH_STRIDE(val) \
650 vxge_vBIT(val, 0, 64)
651/*0x004b8*/ u64 toc_kdfc_fifo_stride;
652#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_INITIAL_TOC_KDFC_FIFO_STRIDE(val) \
653 vxge_vBIT(val, 0, 64)
654
655} __packed;
656
657struct vxge_hw_common_reg {
658
659 u8 unused00a00[0x00a00];
660
661/*0x00a00*/ u64 prc_status1;
662#define VXGE_HW_PRC_STATUS1_PRC_VP_QUIESCENT(n) vxge_mBIT(n)
663/*0x00a08*/ u64 rxdcm_reset_in_progress;
664#define VXGE_HW_RXDCM_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
665/*0x00a10*/ u64 replicq_flush_in_progress;
666#define VXGE_HW_REPLICQ_FLUSH_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
667/*0x00a18*/ u64 rxpe_cmds_reset_in_progress;
668#define VXGE_HW_RXPE_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
669/*0x00a20*/ u64 mxp_cmds_reset_in_progress;
670#define VXGE_HW_MXP_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
671/*0x00a28*/ u64 noffload_reset_in_progress;
672#define VXGE_HW_NOFFLOAD_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
673/*0x00a30*/ u64 rd_req_in_progress;
674#define VXGE_HW_RD_REQ_IN_PROGRESS_VP(n) vxge_mBIT(n)
675/*0x00a38*/ u64 rd_req_outstanding;
676#define VXGE_HW_RD_REQ_OUTSTANDING_VP(n) vxge_mBIT(n)
677/*0x00a40*/ u64 kdfc_reset_in_progress;
678#define VXGE_HW_KDFC_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
679 u8 unused00b00[0x00b00-0x00a48];
680
681/*0x00b00*/ u64 one_cfg_vp;
682#define VXGE_HW_ONE_CFG_VP_RDY(n) vxge_mBIT(n)
683/*0x00b08*/ u64 one_common;
684#define VXGE_HW_ONE_COMMON_PET_VPATH_RESET_IN_PROGRESS(n) vxge_mBIT(n)
685 u8 unused00b80[0x00b80-0x00b10];
686
687/*0x00b80*/ u64 tim_int_en;
688#define VXGE_HW_TIM_INT_EN_TIM_VP(n) vxge_mBIT(n)
689/*0x00b88*/ u64 tim_set_int_en;
690#define VXGE_HW_TIM_SET_INT_EN_VP(n) vxge_mBIT(n)
691/*0x00b90*/ u64 tim_clr_int_en;
692#define VXGE_HW_TIM_CLR_INT_EN_VP(n) vxge_mBIT(n)
693/*0x00b98*/ u64 tim_mask_int_during_reset;
694#define VXGE_HW_TIM_MASK_INT_DURING_RESET_VPATH(n) vxge_mBIT(n)
695/*0x00ba0*/ u64 tim_reset_in_progress;
696#define VXGE_HW_TIM_RESET_IN_PROGRESS_TIM_VPATH(n) vxge_mBIT(n)
697/*0x00ba8*/ u64 tim_outstanding_bmap;
698#define VXGE_HW_TIM_OUTSTANDING_BMAP_TIM_VPATH(n) vxge_mBIT(n)
699 u8 unused00c00[0x00c00-0x00bb0];
700
701/*0x00c00*/ u64 msg_reset_in_progress;
702#define VXGE_HW_MSG_RESET_IN_PROGRESS_MSG_COMPOSITE(val) vxge_vBIT(val, 0, 17)
703/*0x00c08*/ u64 msg_mxp_mr_ready;
704#define VXGE_HW_MSG_MXP_MR_READY_MP_BOOTED(n) vxge_mBIT(n)
705/*0x00c10*/ u64 msg_uxp_mr_ready;
706#define VXGE_HW_MSG_UXP_MR_READY_UP_BOOTED(n) vxge_mBIT(n)
707/*0x00c18*/ u64 msg_dmq_noni_rtl_prefetch;
708#define VXGE_HW_MSG_DMQ_NONI_RTL_PREFETCH_BYPASS_ENABLE(n) vxge_mBIT(n)
709/*0x00c20*/ u64 msg_umq_rtl_bwr;
710#define VXGE_HW_MSG_UMQ_RTL_BWR_PREFETCH_DISABLE(n) vxge_mBIT(n)
711 u8 unused00d00[0x00d00-0x00c28];
712
713/*0x00d00*/ u64 cmn_rsthdlr_cfg0;
714#define VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(val) vxge_vBIT(val, 0, 17)
715/*0x00d08*/ u64 cmn_rsthdlr_cfg1;
716#define VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(val) vxge_vBIT(val, 0, 17)
717/*0x00d10*/ u64 cmn_rsthdlr_cfg2;
718#define VXGE_HW_CMN_RSTHDLR_CFG2_SW_RESET_FIFO0(val) vxge_vBIT(val, 0, 17)
719/*0x00d18*/ u64 cmn_rsthdlr_cfg3;
720#define VXGE_HW_CMN_RSTHDLR_CFG3_SW_RESET_FIFO1(val) vxge_vBIT(val, 0, 17)
721/*0x00d20*/ u64 cmn_rsthdlr_cfg4;
722#define VXGE_HW_CMN_RSTHDLR_CFG4_SW_RESET_FIFO2(val) vxge_vBIT(val, 0, 17)
723 u8 unused00d40[0x00d40-0x00d28];
724
725/*0x00d40*/ u64 cmn_rsthdlr_cfg8;
726#define VXGE_HW_CMN_RSTHDLR_CFG8_INCR_VPATH_INST_NUM(val) vxge_vBIT(val, 0, 17)
727/*0x00d48*/ u64 stats_cfg0;
728#define VXGE_HW_STATS_CFG0_STATS_ENABLE(val) vxge_vBIT(val, 0, 17)
729 u8 unused00da8[0x00da8-0x00d50];
730
731/*0x00da8*/ u64 clear_msix_mask_vect[4];
732#define VXGE_HW_CLEAR_MSIX_MASK_VECT_CLEAR_MSIX_MASK_VECT(val) \
733 vxge_vBIT(val, 0, 17)
734/*0x00dc8*/ u64 set_msix_mask_vect[4];
735#define VXGE_HW_SET_MSIX_MASK_VECT_SET_MSIX_MASK_VECT(val) vxge_vBIT(val, 0, 17)
736/*0x00de8*/ u64 clear_msix_mask_all_vect;
737#define VXGE_HW_CLEAR_MSIX_MASK_ALL_VECT_CLEAR_MSIX_MASK_ALL_VECT(val) \
738 vxge_vBIT(val, 0, 17)
739/*0x00df0*/ u64 set_msix_mask_all_vect;
740#define VXGE_HW_SET_MSIX_MASK_ALL_VECT_SET_MSIX_MASK_ALL_VECT(val) \
741 vxge_vBIT(val, 0, 17)
742/*0x00df8*/ u64 mask_vector[4];
743#define VXGE_HW_MASK_VECTOR_MASK_VECTOR(val) vxge_vBIT(val, 0, 17)
744/*0x00e18*/ u64 msix_pending_vector[4];
745#define VXGE_HW_MSIX_PENDING_VECTOR_MSIX_PENDING_VECTOR(val) \
746 vxge_vBIT(val, 0, 17)
747/*0x00e38*/ u64 clr_msix_one_shot_vec[4];
748#define VXGE_HW_CLR_MSIX_ONE_SHOT_VEC_CLR_MSIX_ONE_SHOT_VEC(val) \
749 vxge_vBIT(val, 0, 17)
750/*0x00e58*/ u64 titan_asic_id;
751#define VXGE_HW_TITAN_ASIC_ID_INITIAL_DEVICE_ID(val) vxge_vBIT(val, 0, 16)
752#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MAJOR_REVISION(val) vxge_vBIT(val, 48, 8)
753#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MINOR_REVISION(val) vxge_vBIT(val, 56, 8)
754/*0x00e60*/ u64 titan_general_int_status;
755#define VXGE_HW_TITAN_GENERAL_INT_STATUS_MRPCIM_ALARM_INT vxge_mBIT(0)
756#define VXGE_HW_TITAN_GENERAL_INT_STATUS_SRPCIM_ALARM_INT vxge_mBIT(1)
757#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT vxge_mBIT(2)
758#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val) \
759 vxge_vBIT(val, 3, 17)
760 u8 unused00e70[0x00e70-0x00e68];
761
762/*0x00e70*/ u64 titan_mask_all_int;
763#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM vxge_mBIT(7)
764#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC vxge_mBIT(15)
765 u8 unused00e80[0x00e80-0x00e78];
766
767/*0x00e80*/ u64 tim_int_status0;
768#define VXGE_HW_TIM_INT_STATUS0_TIM_INT_STATUS0(val) vxge_vBIT(val, 0, 64)
769/*0x00e88*/ u64 tim_int_mask0;
770#define VXGE_HW_TIM_INT_MASK0_TIM_INT_MASK0(val) vxge_vBIT(val, 0, 64)
771/*0x00e90*/ u64 tim_int_status1;
772#define VXGE_HW_TIM_INT_STATUS1_TIM_INT_STATUS1(val) vxge_vBIT(val, 0, 4)
773/*0x00e98*/ u64 tim_int_mask1;
774#define VXGE_HW_TIM_INT_MASK1_TIM_INT_MASK1(val) vxge_vBIT(val, 0, 4)
775/*0x00ea0*/ u64 rti_int_status;
776#define VXGE_HW_RTI_INT_STATUS_RTI_INT_STATUS(val) vxge_vBIT(val, 0, 17)
777/*0x00ea8*/ u64 rti_int_mask;
778#define VXGE_HW_RTI_INT_MASK_RTI_INT_MASK(val) vxge_vBIT(val, 0, 17)
779/*0x00eb0*/ u64 adapter_status;
780#define VXGE_HW_ADAPTER_STATUS_RTDMA_RTDMA_READY vxge_mBIT(0)
781#define VXGE_HW_ADAPTER_STATUS_WRDMA_WRDMA_READY vxge_mBIT(1)
782#define VXGE_HW_ADAPTER_STATUS_KDFC_KDFC_READY vxge_mBIT(2)
783#define VXGE_HW_ADAPTER_STATUS_TPA_TMAC_BUF_EMPTY vxge_mBIT(3)
784#define VXGE_HW_ADAPTER_STATUS_RDCTL_PIC_QUIESCENT vxge_mBIT(4)
785#define VXGE_HW_ADAPTER_STATUS_XGMAC_NETWORK_FAULT vxge_mBIT(5)
786#define VXGE_HW_ADAPTER_STATUS_ROCRC_OFFLOAD_QUIESCENT vxge_mBIT(6)
787#define VXGE_HW_ADAPTER_STATUS_G3IF_FB_G3IF_FB_GDDR3_READY vxge_mBIT(7)
788#define VXGE_HW_ADAPTER_STATUS_G3IF_CM_G3IF_CM_GDDR3_READY vxge_mBIT(8)
789#define VXGE_HW_ADAPTER_STATUS_RIC_RIC_RUNNING vxge_mBIT(9)
790#define VXGE_HW_ADAPTER_STATUS_CMG_C_PLL_IN_LOCK vxge_mBIT(10)
791#define VXGE_HW_ADAPTER_STATUS_XGMAC_X_PLL_IN_LOCK vxge_mBIT(11)
792#define VXGE_HW_ADAPTER_STATUS_FBIF_M_PLL_IN_LOCK vxge_mBIT(12)
793#define VXGE_HW_ADAPTER_STATUS_PCC_PCC_IDLE(val) vxge_vBIT(val, 24, 8)
794#define VXGE_HW_ADAPTER_STATUS_ROCRC_RC_PRC_QUIESCENT(val) vxge_vBIT(val, 44, 8)
795/*0x00eb8*/ u64 gen_ctrl;
796#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_WR_DIS vxge_mBIT(0)
797#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_RD_DIS vxge_mBIT(1)
798#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_WR_DIS vxge_mBIT(2)
799#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_RD_DIS vxge_mBIT(3)
800#define VXGE_HW_GEN_CTRL_SPI_DEBUG_DIS vxge_mBIT(4)
801#define VXGE_HW_GEN_CTRL_SPI_APP_LTSSM_TIMER_DIS vxge_mBIT(5)
802#define VXGE_HW_GEN_CTRL_SPI_NOT_USED(val) vxge_vBIT(val, 6, 4)
803 u8 unused00ed0[0x00ed0-0x00ec0];
804
805/*0x00ed0*/ u64 adapter_ready;
806#define VXGE_HW_ADAPTER_READY_ADAPTER_READY vxge_mBIT(63)
807/*0x00ed8*/ u64 outstanding_read;
808#define VXGE_HW_OUTSTANDING_READ_OUTSTANDING_READ(val) vxge_vBIT(val, 0, 17)
809/*0x00ee0*/ u64 vpath_rst_in_prog;
810#define VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(val) vxge_vBIT(val, 0, 17)
811/*0x00ee8*/ u64 vpath_reg_modified;
812#define VXGE_HW_VPATH_REG_MODIFIED_VPATH_REG_MODIFIED(val) vxge_vBIT(val, 0, 17)
813 u8 unused00fc0[0x00fc0-0x00ef0];
814
815/*0x00fc0*/ u64 cp_reset_in_progress;
816#define VXGE_HW_CP_RESET_IN_PROGRESS_CP_VPATH(n) vxge_mBIT(n)
817 u8 unused01080[0x01080-0x00fc8];
818
819/*0x01080*/ u64 xgmac_ready;
820#define VXGE_HW_XGMAC_READY_XMACJ_READY(val) vxge_vBIT(val, 0, 17)
821 u8 unused010c0[0x010c0-0x01088];
822
823/*0x010c0*/ u64 fbif_ready;
824#define VXGE_HW_FBIF_READY_FAU_READY(val) vxge_vBIT(val, 0, 17)
825 u8 unused01100[0x01100-0x010c8];
826
827/*0x01100*/ u64 vplane_assignments;
828#define VXGE_HW_VPLANE_ASSIGNMENTS_VPLANE_ASSIGNMENTS(val) vxge_vBIT(val, 3, 5)
829/*0x01108*/ u64 vpath_assignments;
830#define VXGE_HW_VPATH_ASSIGNMENTS_VPATH_ASSIGNMENTS(val) vxge_vBIT(val, 0, 17)
831/*0x01110*/ u64 resource_assignments;
832#define VXGE_HW_RESOURCE_ASSIGNMENTS_RESOURCE_ASSIGNMENTS(val) \
833 vxge_vBIT(val, 0, 17)
834/*0x01118*/ u64 host_type_assignments;
835#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_HOST_TYPE_ASSIGNMENTS(val) \
836 vxge_vBIT(val, 5, 3)
837 u8 unused01128[0x01128-0x01120];
838
839/*0x01128*/ u64 max_resource_assignments;
840#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPLANE(val) \
841 vxge_vBIT(val, 3, 5)
842#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPATHS(val) \
843 vxge_vBIT(val, 11, 5)
844/*0x01130*/ u64 pf_vpath_assignments;
845#define VXGE_HW_PF_VPATH_ASSIGNMENTS_PF_VPATH_ASSIGNMENTS(val) \
846 vxge_vBIT(val, 0, 17)
847 u8 unused01200[0x01200-0x01138];
848
849/*0x01200*/ u64 rts_access_icmp;
850#define VXGE_HW_RTS_ACCESS_ICMP_EN(val) vxge_vBIT(val, 0, 17)
851/*0x01208*/ u64 rts_access_tcpsyn;
852#define VXGE_HW_RTS_ACCESS_TCPSYN_EN(val) vxge_vBIT(val, 0, 17)
853/*0x01210*/ u64 rts_access_zl4pyld;
854#define VXGE_HW_RTS_ACCESS_ZL4PYLD_EN(val) vxge_vBIT(val, 0, 17)
855/*0x01218*/ u64 rts_access_l4prtcl_tcp;
856#define VXGE_HW_RTS_ACCESS_L4PRTCL_TCP_EN(val) vxge_vBIT(val, 0, 17)
857/*0x01220*/ u64 rts_access_l4prtcl_udp;
858#define VXGE_HW_RTS_ACCESS_L4PRTCL_UDP_EN(val) vxge_vBIT(val, 0, 17)
859/*0x01228*/ u64 rts_access_l4prtcl_flex;
860#define VXGE_HW_RTS_ACCESS_L4PRTCL_FLEX_EN(val) vxge_vBIT(val, 0, 17)
861/*0x01230*/ u64 rts_access_ipfrag;
862#define VXGE_HW_RTS_ACCESS_IPFRAG_EN(val) vxge_vBIT(val, 0, 17)
863
864} __packed;
865
866struct vxge_hw_memrepair_reg {
867 u64 unused1;
868 u64 unused2;
869} __packed;
870
871struct vxge_hw_pcicfgmgmt_reg {
872
873/*0x00000*/ u64 resource_no;
874#define VXGE_HW_RESOURCE_NO_PFN_OR_VF BIT(3)
875/*0x00008*/ u64 bargrp_pf_or_vf_bar0_mask;
876#define VXGE_HW_BARGRP_PF_OR_VF_BAR0_MASK_BARGRP_PF_OR_VF_BAR0_MASK(val) \
877 vxge_vBIT(val, 2, 6)
878/*0x00010*/ u64 bargrp_pf_or_vf_bar1_mask;
879#define VXGE_HW_BARGRP_PF_OR_VF_BAR1_MASK_BARGRP_PF_OR_VF_BAR1_MASK(val) \
880 vxge_vBIT(val, 2, 6)
881/*0x00018*/ u64 bargrp_pf_or_vf_bar2_mask;
882#define VXGE_HW_BARGRP_PF_OR_VF_BAR2_MASK_BARGRP_PF_OR_VF_BAR2_MASK(val) \
883 vxge_vBIT(val, 2, 6)
884/*0x00020*/ u64 msixgrp_no;
885#define VXGE_HW_MSIXGRP_NO_TABLE_SIZE(val) vxge_vBIT(val, 5, 11)
886
887} __packed;
888
889struct vxge_hw_mrpcim_reg {
890/*0x00000*/ u64 g3fbct_int_status;
891#define VXGE_HW_G3FBCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
892/*0x00008*/ u64 g3fbct_int_mask;
893/*0x00010*/ u64 g3fbct_err_reg;
894#define VXGE_HW_G3FBCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
895#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
896#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
897#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
898#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
899#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
900#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
901/*0x00018*/ u64 g3fbct_err_mask;
902/*0x00020*/ u64 g3fbct_err_alarm;
903
904 u8 unused00a00[0x00a00-0x00028];
905
906/*0x00a00*/ u64 wrdma_int_status;
907#define VXGE_HW_WRDMA_INT_STATUS_RC_ALARM_RC_INT vxge_mBIT(0)
908#define VXGE_HW_WRDMA_INT_STATUS_RXDRM_SM_ERR_RXDRM_INT vxge_mBIT(1)
909#define VXGE_HW_WRDMA_INT_STATUS_RXDCM_SM_ERR_RXDCM_SM_INT vxge_mBIT(2)
910#define VXGE_HW_WRDMA_INT_STATUS_RXDWM_SM_ERR_RXDWM_INT vxge_mBIT(3)
911#define VXGE_HW_WRDMA_INT_STATUS_RDA_ERR_RDA_INT vxge_mBIT(6)
912#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_DB_RDA_ECC_DB_INT vxge_mBIT(8)
913#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_SG_RDA_ECC_SG_INT vxge_mBIT(9)
914#define VXGE_HW_WRDMA_INT_STATUS_FRF_ALARM_FRF_INT vxge_mBIT(12)
915#define VXGE_HW_WRDMA_INT_STATUS_ROCRC_ALARM_ROCRC_INT vxge_mBIT(13)
916#define VXGE_HW_WRDMA_INT_STATUS_WDE0_ALARM_WDE0_INT vxge_mBIT(14)
917#define VXGE_HW_WRDMA_INT_STATUS_WDE1_ALARM_WDE1_INT vxge_mBIT(15)
918#define VXGE_HW_WRDMA_INT_STATUS_WDE2_ALARM_WDE2_INT vxge_mBIT(16)
919#define VXGE_HW_WRDMA_INT_STATUS_WDE3_ALARM_WDE3_INT vxge_mBIT(17)
920/*0x00a08*/ u64 wrdma_int_mask;
921/*0x00a10*/ u64 rc_alarm_reg;
922#define VXGE_HW_RC_ALARM_REG_FTC_SM_ERR vxge_mBIT(0)
923#define VXGE_HW_RC_ALARM_REG_FTC_SM_PHASE_ERR vxge_mBIT(1)
924#define VXGE_HW_RC_ALARM_REG_BTDWM_SM_ERR vxge_mBIT(2)
925#define VXGE_HW_RC_ALARM_REG_BTC_SM_ERR vxge_mBIT(3)
926#define VXGE_HW_RC_ALARM_REG_BTDCM_SM_ERR vxge_mBIT(4)
927#define VXGE_HW_RC_ALARM_REG_BTDRM_SM_ERR vxge_mBIT(5)
928#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_DB_ERR vxge_mBIT(6)
929#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_SG_ERR vxge_mBIT(7)
930#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_DB_ERR vxge_mBIT(8)
931#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_SG_ERR vxge_mBIT(9)
932#define VXGE_HW_RC_ALARM_REG_RMM_SM_ERR vxge_mBIT(10)
933#define VXGE_HW_RC_ALARM_REG_BTC_VPATH_MISMATCH_ERR vxge_mBIT(12)
934/*0x00a18*/ u64 rc_alarm_mask;
935/*0x00a20*/ u64 rc_alarm_alarm;
936/*0x00a28*/ u64 rxdrm_sm_err_reg;
937#define VXGE_HW_RXDRM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
938/*0x00a30*/ u64 rxdrm_sm_err_mask;
939/*0x00a38*/ u64 rxdrm_sm_err_alarm;
940/*0x00a40*/ u64 rxdcm_sm_err_reg;
941#define VXGE_HW_RXDCM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
942/*0x00a48*/ u64 rxdcm_sm_err_mask;
943/*0x00a50*/ u64 rxdcm_sm_err_alarm;
944/*0x00a58*/ u64 rxdwm_sm_err_reg;
945#define VXGE_HW_RXDWM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
946/*0x00a60*/ u64 rxdwm_sm_err_mask;
947/*0x00a68*/ u64 rxdwm_sm_err_alarm;
948/*0x00a70*/ u64 rda_err_reg;
949#define VXGE_HW_RDA_ERR_REG_RDA_SM0_ERR_ALARM vxge_mBIT(0)
950#define VXGE_HW_RDA_ERR_REG_RDA_MISC_ERR vxge_mBIT(1)
951#define VXGE_HW_RDA_ERR_REG_RDA_PCIX_ERR vxge_mBIT(2)
952#define VXGE_HW_RDA_ERR_REG_RDA_RXD_ECC_DB_ERR vxge_mBIT(3)
953#define VXGE_HW_RDA_ERR_REG_RDA_FRM_ECC_DB_ERR vxge_mBIT(4)
954#define VXGE_HW_RDA_ERR_REG_RDA_UQM_ECC_DB_ERR vxge_mBIT(5)
955#define VXGE_HW_RDA_ERR_REG_RDA_IMM_ECC_DB_ERR vxge_mBIT(6)
956#define VXGE_HW_RDA_ERR_REG_RDA_TIM_ECC_DB_ERR vxge_mBIT(7)
957/*0x00a78*/ u64 rda_err_mask;
958/*0x00a80*/ u64 rda_err_alarm;
959/*0x00a88*/ u64 rda_ecc_db_reg;
960#define VXGE_HW_RDA_ECC_DB_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
961/*0x00a90*/ u64 rda_ecc_db_mask;
962/*0x00a98*/ u64 rda_ecc_db_alarm;
963/*0x00aa0*/ u64 rda_ecc_sg_reg;
964#define VXGE_HW_RDA_ECC_SG_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
965/*0x00aa8*/ u64 rda_ecc_sg_mask;
966/*0x00ab0*/ u64 rda_ecc_sg_alarm;
967/*0x00ab8*/ u64 rqa_err_reg;
968#define VXGE_HW_RQA_ERR_REG_RQA_SM_ERR_ALARM vxge_mBIT(0)
969/*0x00ac0*/ u64 rqa_err_mask;
970/*0x00ac8*/ u64 rqa_err_alarm;
971/*0x00ad0*/ u64 frf_alarm_reg;
972#define VXGE_HW_FRF_ALARM_REG_PRC_VP_FRF_SM_ERR(n) vxge_mBIT(n)
973/*0x00ad8*/ u64 frf_alarm_mask;
974/*0x00ae0*/ u64 frf_alarm_alarm;
975/*0x00ae8*/ u64 rocrc_alarm_reg;
976#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_DB vxge_mBIT(0)
977#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_SG vxge_mBIT(1)
978#define VXGE_HW_ROCRC_ALARM_REG_NOA_NMA_SM_ERR vxge_mBIT(2)
979#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_DB vxge_mBIT(3)
980#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_SG vxge_mBIT(4)
981#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_DB vxge_mBIT(5)
982#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_SG vxge_mBIT(6)
983#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_DB vxge_mBIT(11)
984#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_SG vxge_mBIT(12)
985#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_RSVD_ERR vxge_mBIT(13)
986#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_OWN_ERR vxge_mBIT(14)
987#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_BYP_OWN_ERR vxge_mBIT(15)
988#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_NOT_ASSIGNED_ERR vxge_mBIT(16)
989#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_RSVD_SYNC_ERR vxge_mBIT(17)
990#define VXGE_HW_ROCRC_ALARM_REG_QCQ_LOST_EGB_ERR vxge_mBIT(18)
991#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ0_OVERFLOW vxge_mBIT(19)
992#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ1_OVERFLOW vxge_mBIT(20)
993#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ2_OVERFLOW vxge_mBIT(21)
994#define VXGE_HW_ROCRC_ALARM_REG_NOA_WCT_CMD_FIFO_ERR vxge_mBIT(22)
995/*0x00af0*/ u64 rocrc_alarm_mask;
996/*0x00af8*/ u64 rocrc_alarm_alarm;
997/*0x00b00*/ u64 wde0_alarm_reg;
998#define VXGE_HW_WDE0_ALARM_REG_WDE0_DCC_SM_ERR vxge_mBIT(0)
999#define VXGE_HW_WDE0_ALARM_REG_WDE0_PRM_SM_ERR vxge_mBIT(1)
1000#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_SM_ERR vxge_mBIT(2)
1001#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_CMD_ERR vxge_mBIT(3)
1002#define VXGE_HW_WDE0_ALARM_REG_WDE0_PCR_SM_ERR vxge_mBIT(4)
1003/*0x00b08*/ u64 wde0_alarm_mask;
1004/*0x00b10*/ u64 wde0_alarm_alarm;
1005/*0x00b18*/ u64 wde1_alarm_reg;
1006#define VXGE_HW_WDE1_ALARM_REG_WDE1_DCC_SM_ERR vxge_mBIT(0)
1007#define VXGE_HW_WDE1_ALARM_REG_WDE1_PRM_SM_ERR vxge_mBIT(1)
1008#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_SM_ERR vxge_mBIT(2)
1009#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_CMD_ERR vxge_mBIT(3)
1010#define VXGE_HW_WDE1_ALARM_REG_WDE1_PCR_SM_ERR vxge_mBIT(4)
1011/*0x00b20*/ u64 wde1_alarm_mask;
1012/*0x00b28*/ u64 wde1_alarm_alarm;
1013/*0x00b30*/ u64 wde2_alarm_reg;
1014#define VXGE_HW_WDE2_ALARM_REG_WDE2_DCC_SM_ERR vxge_mBIT(0)
1015#define VXGE_HW_WDE2_ALARM_REG_WDE2_PRM_SM_ERR vxge_mBIT(1)
1016#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_SM_ERR vxge_mBIT(2)
1017#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_CMD_ERR vxge_mBIT(3)
1018#define VXGE_HW_WDE2_ALARM_REG_WDE2_PCR_SM_ERR vxge_mBIT(4)
1019/*0x00b38*/ u64 wde2_alarm_mask;
1020/*0x00b40*/ u64 wde2_alarm_alarm;
1021/*0x00b48*/ u64 wde3_alarm_reg;
1022#define VXGE_HW_WDE3_ALARM_REG_WDE3_DCC_SM_ERR vxge_mBIT(0)
1023#define VXGE_HW_WDE3_ALARM_REG_WDE3_PRM_SM_ERR vxge_mBIT(1)
1024#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_SM_ERR vxge_mBIT(2)
1025#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_CMD_ERR vxge_mBIT(3)
1026#define VXGE_HW_WDE3_ALARM_REG_WDE3_PCR_SM_ERR vxge_mBIT(4)
1027/*0x00b50*/ u64 wde3_alarm_mask;
1028/*0x00b58*/ u64 wde3_alarm_alarm;
1029
1030 u8 unused00be8[0x00be8-0x00b60];
1031
1032/*0x00be8*/ u64 rx_w_round_robin_0;
1033#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(val) vxge_vBIT(val, 3, 5)
1034#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(val) vxge_vBIT(val, 11, 5)
1035#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(val) vxge_vBIT(val, 19, 5)
1036#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(val) vxge_vBIT(val, 27, 5)
1037#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(val) vxge_vBIT(val, 35, 5)
1038#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(val) vxge_vBIT(val, 43, 5)
1039#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(val) vxge_vBIT(val, 51, 5)
1040#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(val) vxge_vBIT(val, 59, 5)
1041/*0x00bf0*/ u64 rx_w_round_robin_1;
1042#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_8(val) vxge_vBIT(val, 3, 5)
1043#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_9(val) vxge_vBIT(val, 11, 5)
1044#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_10(val) \
1045 vxge_vBIT(val, 19, 5)
1046#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_11(val) \
1047 vxge_vBIT(val, 27, 5)
1048#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_12(val) \
1049 vxge_vBIT(val, 35, 5)
1050#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_13(val) \
1051 vxge_vBIT(val, 43, 5)
1052#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_14(val) \
1053 vxge_vBIT(val, 51, 5)
1054#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_15(val) \
1055 vxge_vBIT(val, 59, 5)
1056/*0x00bf8*/ u64 rx_w_round_robin_2;
1057#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_16(val) vxge_vBIT(val, 3, 5)
1058#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_17(val) \
1059 vxge_vBIT(val, 11, 5)
1060#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_18(val) \
1061 vxge_vBIT(val, 19, 5)
1062#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_19(val) \
1063 vxge_vBIT(val, 27, 5)
1064#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_20(val) \
1065 vxge_vBIT(val, 35, 5)
1066#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_21(val) \
1067 vxge_vBIT(val, 43, 5)
1068#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_22(val) \
1069 vxge_vBIT(val, 51, 5)
1070#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_23(val) \
1071 vxge_vBIT(val, 59, 5)
1072/*0x00c00*/ u64 rx_w_round_robin_3;
1073#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_24(val) vxge_vBIT(val, 3, 5)
1074#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_25(val) \
1075 vxge_vBIT(val, 11, 5)
1076#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_26(val) \
1077 vxge_vBIT(val, 19, 5)
1078#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_27(val) \
1079 vxge_vBIT(val, 27, 5)
1080#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_28(val) \
1081 vxge_vBIT(val, 35, 5)
1082#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_29(val) \
1083 vxge_vBIT(val, 43, 5)
1084#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_30(val) \
1085 vxge_vBIT(val, 51, 5)
1086#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_31(val) \
1087 vxge_vBIT(val, 59, 5)
1088/*0x00c08*/ u64 rx_w_round_robin_4;
1089#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_32(val) vxge_vBIT(val, 3, 5)
1090#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_33(val) \
1091 vxge_vBIT(val, 11, 5)
1092#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_34(val) \
1093 vxge_vBIT(val, 19, 5)
1094#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_35(val) \
1095 vxge_vBIT(val, 27, 5)
1096#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_36(val) \
1097 vxge_vBIT(val, 35, 5)
1098#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_37(val) \
1099 vxge_vBIT(val, 43, 5)
1100#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_38(val) \
1101 vxge_vBIT(val, 51, 5)
1102#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_39(val) \
1103 vxge_vBIT(val, 59, 5)
1104/*0x00c10*/ u64 rx_w_round_robin_5;
1105#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_40(val) vxge_vBIT(val, 3, 5)
1106#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_41(val) \
1107 vxge_vBIT(val, 11, 5)
1108#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_42(val) \
1109 vxge_vBIT(val, 19, 5)
1110#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_43(val) \
1111 vxge_vBIT(val, 27, 5)
1112#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_44(val) \
1113 vxge_vBIT(val, 35, 5)
1114#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_45(val) \
1115 vxge_vBIT(val, 43, 5)
1116#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_46(val) \
1117 vxge_vBIT(val, 51, 5)
1118#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_47(val) \
1119 vxge_vBIT(val, 59, 5)
1120/*0x00c18*/ u64 rx_w_round_robin_6;
1121#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_48(val) vxge_vBIT(val, 3, 5)
1122#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_49(val) \
1123 vxge_vBIT(val, 11, 5)
1124#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_50(val) \
1125 vxge_vBIT(val, 19, 5)
1126#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_51(val) \
1127 vxge_vBIT(val, 27, 5)
1128#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_52(val) \
1129 vxge_vBIT(val, 35, 5)
1130#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_53(val) \
1131 vxge_vBIT(val, 43, 5)
1132#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_54(val) \
1133 vxge_vBIT(val, 51, 5)
1134#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_55(val) \
1135 vxge_vBIT(val, 59, 5)
1136/*0x00c20*/ u64 rx_w_round_robin_7;
1137#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_56(val) vxge_vBIT(val, 3, 5)
1138#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_57(val) \
1139 vxge_vBIT(val, 11, 5)
1140#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_58(val) \
1141 vxge_vBIT(val, 19, 5)
1142#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_59(val) \
1143 vxge_vBIT(val, 27, 5)
1144#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_60(val) \
1145 vxge_vBIT(val, 35, 5)
1146#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_61(val) \
1147 vxge_vBIT(val, 43, 5)
1148#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_62(val) \
1149 vxge_vBIT(val, 51, 5)
1150#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_63(val) \
1151 vxge_vBIT(val, 59, 5)
1152/*0x00c28*/ u64 rx_w_round_robin_8;
1153#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_64(val) vxge_vBIT(val, 3, 5)
1154#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_65(val) \
1155 vxge_vBIT(val, 11, 5)
1156#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_66(val) \
1157 vxge_vBIT(val, 19, 5)
1158#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_67(val) \
1159 vxge_vBIT(val, 27, 5)
1160#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_68(val) \
1161 vxge_vBIT(val, 35, 5)
1162#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_69(val) \
1163 vxge_vBIT(val, 43, 5)
1164#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_70(val) \
1165 vxge_vBIT(val, 51, 5)
1166#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_71(val) \
1167 vxge_vBIT(val, 59, 5)
1168/*0x00c30*/ u64 rx_w_round_robin_9;
1169#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_72(val) vxge_vBIT(val, 3, 5)
1170#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_73(val) \
1171 vxge_vBIT(val, 11, 5)
1172#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_74(val) \
1173 vxge_vBIT(val, 19, 5)
1174#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_75(val) \
1175 vxge_vBIT(val, 27, 5)
1176#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_76(val) \
1177 vxge_vBIT(val, 35, 5)
1178#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_77(val) \
1179 vxge_vBIT(val, 43, 5)
1180#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_78(val) \
1181 vxge_vBIT(val, 51, 5)
1182#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_79(val) \
1183 vxge_vBIT(val, 59, 5)
1184/*0x00c38*/ u64 rx_w_round_robin_10;
1185#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_80(val) \
1186 vxge_vBIT(val, 3, 5)
1187#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_81(val) \
1188 vxge_vBIT(val, 11, 5)
1189#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_82(val) \
1190 vxge_vBIT(val, 19, 5)
1191#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_83(val) \
1192 vxge_vBIT(val, 27, 5)
1193#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_84(val) \
1194 vxge_vBIT(val, 35, 5)
1195#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_85(val) \
1196 vxge_vBIT(val, 43, 5)
1197#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_86(val) \
1198 vxge_vBIT(val, 51, 5)
1199#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_87(val) \
1200 vxge_vBIT(val, 59, 5)
1201/*0x00c40*/ u64 rx_w_round_robin_11;
1202#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_88(val) \
1203 vxge_vBIT(val, 3, 5)
1204#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_89(val) \
1205 vxge_vBIT(val, 11, 5)
1206#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_90(val) \
1207 vxge_vBIT(val, 19, 5)
1208#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_91(val) \
1209 vxge_vBIT(val, 27, 5)
1210#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_92(val) \
1211 vxge_vBIT(val, 35, 5)
1212#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_93(val) \
1213 vxge_vBIT(val, 43, 5)
1214#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_94(val) \
1215 vxge_vBIT(val, 51, 5)
1216#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_95(val) \
1217 vxge_vBIT(val, 59, 5)
1218/*0x00c48*/ u64 rx_w_round_robin_12;
1219#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_96(val) \
1220 vxge_vBIT(val, 3, 5)
1221#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_97(val) \
1222 vxge_vBIT(val, 11, 5)
1223#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_98(val) \
1224 vxge_vBIT(val, 19, 5)
1225#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_99(val) \
1226 vxge_vBIT(val, 27, 5)
1227#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_100(val) \
1228 vxge_vBIT(val, 35, 5)
1229#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_101(val) \
1230 vxge_vBIT(val, 43, 5)
1231#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_102(val) \
1232 vxge_vBIT(val, 51, 5)
1233#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_103(val) \
1234 vxge_vBIT(val, 59, 5)
1235/*0x00c50*/ u64 rx_w_round_robin_13;
1236#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_104(val) \
1237 vxge_vBIT(val, 3, 5)
1238#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_105(val) \
1239 vxge_vBIT(val, 11, 5)
1240#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_106(val) \
1241 vxge_vBIT(val, 19, 5)
1242#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_107(val) \
1243 vxge_vBIT(val, 27, 5)
1244#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_108(val) \
1245 vxge_vBIT(val, 35, 5)
1246#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_109(val) \
1247 vxge_vBIT(val, 43, 5)
1248#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_110(val) \
1249 vxge_vBIT(val, 51, 5)
1250#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_111(val) \
1251 vxge_vBIT(val, 59, 5)
1252/*0x00c58*/ u64 rx_w_round_robin_14;
1253#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_112(val) \
1254 vxge_vBIT(val, 3, 5)
1255#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_113(val) \
1256 vxge_vBIT(val, 11, 5)
1257#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_114(val) \
1258 vxge_vBIT(val, 19, 5)
1259#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_115(val) \
1260 vxge_vBIT(val, 27, 5)
1261#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_116(val) \
1262 vxge_vBIT(val, 35, 5)
1263#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_117(val) \
1264 vxge_vBIT(val, 43, 5)
1265#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_118(val) \
1266 vxge_vBIT(val, 51, 5)
1267#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_119(val) \
1268 vxge_vBIT(val, 59, 5)
1269/*0x00c60*/ u64 rx_w_round_robin_15;
1270#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_120(val) \
1271 vxge_vBIT(val, 3, 5)
1272#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_121(val) \
1273 vxge_vBIT(val, 11, 5)
1274#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_122(val) \
1275 vxge_vBIT(val, 19, 5)
1276#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_123(val) \
1277 vxge_vBIT(val, 27, 5)
1278#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_124(val) \
1279 vxge_vBIT(val, 35, 5)
1280#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_125(val) \
1281 vxge_vBIT(val, 43, 5)
1282#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_126(val) \
1283 vxge_vBIT(val, 51, 5)
1284#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_127(val) \
1285 vxge_vBIT(val, 59, 5)
1286/*0x00c68*/ u64 rx_w_round_robin_16;
1287#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_128(val) \
1288 vxge_vBIT(val, 3, 5)
1289#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_129(val) \
1290 vxge_vBIT(val, 11, 5)
1291#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_130(val) \
1292 vxge_vBIT(val, 19, 5)
1293#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_131(val) \
1294 vxge_vBIT(val, 27, 5)
1295#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_132(val) \
1296 vxge_vBIT(val, 35, 5)
1297#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_133(val) \
1298 vxge_vBIT(val, 43, 5)
1299#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_134(val) \
1300 vxge_vBIT(val, 51, 5)
1301#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_135(val) \
1302 vxge_vBIT(val, 59, 5)
1303/*0x00c70*/ u64 rx_w_round_robin_17;
1304#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_136(val) \
1305 vxge_vBIT(val, 3, 5)
1306#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_137(val) \
1307 vxge_vBIT(val, 11, 5)
1308#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_138(val) \
1309 vxge_vBIT(val, 19, 5)
1310#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_139(val) \
1311 vxge_vBIT(val, 27, 5)
1312#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_140(val) \
1313 vxge_vBIT(val, 35, 5)
1314#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_141(val) \
1315 vxge_vBIT(val, 43, 5)
1316#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_142(val) \
1317 vxge_vBIT(val, 51, 5)
1318#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_143(val) \
1319 vxge_vBIT(val, 59, 5)
1320/*0x00c78*/ u64 rx_w_round_robin_18;
1321#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_144(val) \
1322 vxge_vBIT(val, 3, 5)
1323#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_145(val) \
1324 vxge_vBIT(val, 11, 5)
1325#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_146(val) \
1326 vxge_vBIT(val, 19, 5)
1327#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_147(val) \
1328 vxge_vBIT(val, 27, 5)
1329#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_148(val) \
1330 vxge_vBIT(val, 35, 5)
1331#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_149(val) \
1332 vxge_vBIT(val, 43, 5)
1333#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_150(val) \
1334 vxge_vBIT(val, 51, 5)
1335#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_151(val) \
1336 vxge_vBIT(val, 59, 5)
1337/*0x00c80*/ u64 rx_w_round_robin_19;
1338#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_152(val) \
1339 vxge_vBIT(val, 3, 5)
1340#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_153(val) \
1341 vxge_vBIT(val, 11, 5)
1342#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_154(val) \
1343 vxge_vBIT(val, 19, 5)
1344#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_155(val) \
1345 vxge_vBIT(val, 27, 5)
1346#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_156(val) \
1347 vxge_vBIT(val, 35, 5)
1348#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_157(val) \
1349 vxge_vBIT(val, 43, 5)
1350#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_158(val) \
1351 vxge_vBIT(val, 51, 5)
1352#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_159(val) \
1353 vxge_vBIT(val, 59, 5)
1354/*0x00c88*/ u64 rx_w_round_robin_20;
1355#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_160(val) \
1356 vxge_vBIT(val, 3, 5)
1357#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_161(val) \
1358 vxge_vBIT(val, 11, 5)
1359#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_162(val) \
1360 vxge_vBIT(val, 19, 5)
1361#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_163(val) \
1362 vxge_vBIT(val, 27, 5)
1363#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_164(val) \
1364 vxge_vBIT(val, 35, 5)
1365#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_165(val) \
1366 vxge_vBIT(val, 43, 5)
1367#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_166(val) \
1368 vxge_vBIT(val, 51, 5)
1369#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_167(val) \
1370 vxge_vBIT(val, 59, 5)
1371/*0x00c90*/ u64 rx_w_round_robin_21;
1372#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_168(val) \
1373 vxge_vBIT(val, 3, 5)
1374#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_169(val) \
1375 vxge_vBIT(val, 11, 5)
1376#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_170(val) \
1377 vxge_vBIT(val, 19, 5)
1378
1379#define VXGE_HW_WRR_RING_SERVICE_STATES 171
1380#define VXGE_HW_WRR_RING_COUNT 22
1381
1382/*0x00c98*/ u64 rx_queue_priority_0;
1383#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(val) vxge_vBIT(val, 3, 5)
1384#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(val) vxge_vBIT(val, 11, 5)
1385#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(val) vxge_vBIT(val, 19, 5)
1386#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(val) vxge_vBIT(val, 27, 5)
1387#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(val) vxge_vBIT(val, 35, 5)
1388#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(val) vxge_vBIT(val, 43, 5)
1389#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(val) vxge_vBIT(val, 51, 5)
1390#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(val) vxge_vBIT(val, 59, 5)
1391/*0x00ca0*/ u64 rx_queue_priority_1;
1392#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(val) vxge_vBIT(val, 3, 5)
1393#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(val) vxge_vBIT(val, 11, 5)
1394#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(val) vxge_vBIT(val, 19, 5)
1395#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(val) vxge_vBIT(val, 27, 5)
1396#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(val) vxge_vBIT(val, 35, 5)
1397#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(val) vxge_vBIT(val, 43, 5)
1398#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(val) vxge_vBIT(val, 51, 5)
1399#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(val) vxge_vBIT(val, 59, 5)
1400/*0x00ca8*/ u64 rx_queue_priority_2;
1401#define VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(val) vxge_vBIT(val, 3, 5)
1402 u8 unused00cc8[0x00cc8-0x00cb0];
1403
1404/*0x00cc8*/ u64 replication_queue_priority;
1405#define VXGE_HW_REPLICATION_QUEUE_PRIORITY_REPLICATION_QUEUE_PRIORITY(val) \
1406 vxge_vBIT(val, 59, 5)
1407/*0x00cd0*/ u64 rx_queue_select;
1408#define VXGE_HW_RX_QUEUE_SELECT_NUMBER(n) vxge_mBIT(n)
1409#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_CODE vxge_mBIT(15)
1410#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_HIERARCHICAL_PRTY vxge_mBIT(23)
1411/*0x00cd8*/ u64 rqa_vpbp_ctrl;
1412#define VXGE_HW_RQA_VPBP_CTRL_WR_XON_DIS vxge_mBIT(15)
1413#define VXGE_HW_RQA_VPBP_CTRL_ROCRC_DIS vxge_mBIT(23)
1414#define VXGE_HW_RQA_VPBP_CTRL_TXPE_DIS vxge_mBIT(31)
1415/*0x00ce0*/ u64 rx_multi_cast_ctrl;
1416#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_DIS vxge_mBIT(0)
1417#define VXGE_HW_RX_MULTI_CAST_CTRL_FRM_DROP_DIS vxge_mBIT(1)
1418#define VXGE_HW_RX_MULTI_CAST_CTRL_NO_RXD_TIME_OUT_CNT(val) \
1419 vxge_vBIT(val, 2, 30)
1420#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_CNT(val) vxge_vBIT(val, 32, 32)
1421/*0x00ce8*/ u64 wde_prm_ctrl;
1422#define VXGE_HW_WDE_PRM_CTRL_SPAV_THRESHOLD(val) vxge_vBIT(val, 2, 10)
1423#define VXGE_HW_WDE_PRM_CTRL_SPLIT_THRESHOLD(val) vxge_vBIT(val, 18, 14)
1424#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_1ST_ROW vxge_mBIT(32)
1425#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_ROW_BNDRY vxge_mBIT(33)
1426#define VXGE_HW_WDE_PRM_CTRL_FB_ROW_SIZE(val) vxge_vBIT(val, 46, 2)
1427/*0x00cf0*/ u64 noa_ctrl;
1428#define VXGE_HW_NOA_CTRL_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 3, 5)
1429#define VXGE_HW_NOA_CTRL_NON_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 11, 5)
1430#define VXGE_HW_NOA_CTRL_IGNORE_KDFC_IF_STATUS vxge_mBIT(16)
1431#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE0(val) vxge_vBIT(val, 37, 4)
1432#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE1(val) vxge_vBIT(val, 45, 4)
1433#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE2(val) vxge_vBIT(val, 53, 4)
1434#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE3(val) vxge_vBIT(val, 60, 4)
1435/*0x00cf8*/ u64 phase_cfg;
1436#define VXGE_HW_PHASE_CFG_QCC_WR_PHASE_EN vxge_mBIT(0)
1437#define VXGE_HW_PHASE_CFG_QCC_RD_PHASE_EN vxge_mBIT(3)
1438#define VXGE_HW_PHASE_CFG_IMMM_WR_PHASE_EN vxge_mBIT(7)
1439#define VXGE_HW_PHASE_CFG_IMMM_RD_PHASE_EN vxge_mBIT(11)
1440#define VXGE_HW_PHASE_CFG_UMQM_WR_PHASE_EN vxge_mBIT(15)
1441#define VXGE_HW_PHASE_CFG_UMQM_RD_PHASE_EN vxge_mBIT(19)
1442#define VXGE_HW_PHASE_CFG_RCBM_WR_PHASE_EN vxge_mBIT(23)
1443#define VXGE_HW_PHASE_CFG_RCBM_RD_PHASE_EN vxge_mBIT(27)
1444#define VXGE_HW_PHASE_CFG_RXD_RC_WR_PHASE_EN vxge_mBIT(31)
1445#define VXGE_HW_PHASE_CFG_RXD_RC_RD_PHASE_EN vxge_mBIT(35)
1446#define VXGE_HW_PHASE_CFG_RXD_RHS_WR_PHASE_EN vxge_mBIT(39)
1447#define VXGE_HW_PHASE_CFG_RXD_RHS_RD_PHASE_EN vxge_mBIT(43)
1448/*0x00d00*/ u64 rcq_bypq_cfg;
1449#define VXGE_HW_RCQ_BYPQ_CFG_OVERFLOW_THRESHOLD(val) vxge_vBIT(val, 10, 22)
1450#define VXGE_HW_RCQ_BYPQ_CFG_BYP_ON_THRESHOLD(val) vxge_vBIT(val, 39, 9)
1451#define VXGE_HW_RCQ_BYPQ_CFG_BYP_OFF_THRESHOLD(val) vxge_vBIT(val, 55, 9)
1452 u8 unused00e00[0x00e00-0x00d08];
1453
1454/*0x00e00*/ u64 doorbell_int_status;
1455#define VXGE_HW_DOORBELL_INT_STATUS_KDFC_ERR_REG_TXDMA_KDFC_INT vxge_mBIT(7)
1456#define VXGE_HW_DOORBELL_INT_STATUS_USDC_ERR_REG_TXDMA_USDC_INT vxge_mBIT(15)
1457/*0x00e08*/ u64 doorbell_int_mask;
1458/*0x00e10*/ u64 kdfc_err_reg;
1459#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
1460#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
1461#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
1462#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
1463#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
1464/*0x00e18*/ u64 kdfc_err_mask;
1465/*0x00e20*/ u64 kdfc_err_reg_alarm;
1466#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
1467#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
1468#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
1469#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
1470#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
1471 u8 unused00e40[0x00e40-0x00e28];
1472/*0x00e40*/ u64 kdfc_vp_partition_0;
1473#define VXGE_HW_KDFC_VP_PARTITION_0_ENABLE vxge_mBIT(0)
1474#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_0(val) vxge_vBIT(val, 5, 3)
1475#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_0(val) vxge_vBIT(val, 17, 15)
1476#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_1(val) vxge_vBIT(val, 37, 3)
1477#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_1(val) vxge_vBIT(val, 49, 15)
1478/*0x00e48*/ u64 kdfc_vp_partition_1;
1479#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_2(val) vxge_vBIT(val, 5, 3)
1480#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_2(val) vxge_vBIT(val, 17, 15)
1481#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_3(val) vxge_vBIT(val, 37, 3)
1482#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_3(val) vxge_vBIT(val, 49, 15)
1483/*0x00e50*/ u64 kdfc_vp_partition_2;
1484#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_4(val) vxge_vBIT(val, 5, 3)
1485#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_4(val) vxge_vBIT(val, 17, 15)
1486#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_5(val) vxge_vBIT(val, 37, 3)
1487#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_5(val) vxge_vBIT(val, 49, 15)
1488/*0x00e58*/ u64 kdfc_vp_partition_3;
1489#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_6(val) vxge_vBIT(val, 5, 3)
1490#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_6(val) vxge_vBIT(val, 17, 15)
1491#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_7(val) vxge_vBIT(val, 37, 3)
1492#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_7(val) vxge_vBIT(val, 49, 15)
1493/*0x00e60*/ u64 kdfc_vp_partition_4;
1494#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_8(val) vxge_vBIT(val, 17, 15)
1495#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_9(val) vxge_vBIT(val, 49, 15)
1496/*0x00e68*/ u64 kdfc_vp_partition_5;
1497#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_10(val) vxge_vBIT(val, 17, 15)
1498#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_11(val) vxge_vBIT(val, 49, 15)
1499/*0x00e70*/ u64 kdfc_vp_partition_6;
1500#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_12(val) vxge_vBIT(val, 17, 15)
1501#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_13(val) vxge_vBIT(val, 49, 15)
1502/*0x00e78*/ u64 kdfc_vp_partition_7;
1503#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_14(val) vxge_vBIT(val, 17, 15)
1504#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_15(val) vxge_vBIT(val, 49, 15)
1505/*0x00e80*/ u64 kdfc_vp_partition_8;
1506#define VXGE_HW_KDFC_VP_PARTITION_8_LENGTH_16(val) vxge_vBIT(val, 17, 15)
1507/*0x00e88*/ u64 kdfc_w_round_robin_0;
1508#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(val) vxge_vBIT(val, 3, 5)
1509#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(val) vxge_vBIT(val, 11, 5)
1510#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(val) vxge_vBIT(val, 19, 5)
1511#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(val) vxge_vBIT(val, 27, 5)
1512#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(val) vxge_vBIT(val, 35, 5)
1513#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(val) vxge_vBIT(val, 43, 5)
1514#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(val) vxge_vBIT(val, 51, 5)
1515#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(val) vxge_vBIT(val, 59, 5)
1516
1517 u8 unused0f28[0x0f28-0x0e90];
1518
1519/*0x00f28*/ u64 kdfc_w_round_robin_20;
1520#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_0(val) vxge_vBIT(val, 3, 5)
1521#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_1(val) vxge_vBIT(val, 11, 5)
1522#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_2(val) vxge_vBIT(val, 19, 5)
1523#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_3(val) vxge_vBIT(val, 27, 5)
1524#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_4(val) vxge_vBIT(val, 35, 5)
1525#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_5(val) vxge_vBIT(val, 43, 5)
1526#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_6(val) vxge_vBIT(val, 51, 5)
1527#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_7(val) vxge_vBIT(val, 59, 5)
1528
1529#define VXGE_HW_WRR_FIFO_COUNT 20
1530
1531 u8 unused0fc8[0x0fc8-0x0f30];
1532
1533/*0x00fc8*/ u64 kdfc_w_round_robin_40;
1534#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_0(val) vxge_vBIT(val, 3, 5)
1535#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_1(val) vxge_vBIT(val, 11, 5)
1536#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_2(val) vxge_vBIT(val, 19, 5)
1537#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_3(val) vxge_vBIT(val, 27, 5)
1538#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_4(val) vxge_vBIT(val, 35, 5)
1539#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_5(val) vxge_vBIT(val, 43, 5)
1540#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_6(val) vxge_vBIT(val, 51, 5)
1541#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_7(val) vxge_vBIT(val, 59, 5)
1542
1543 u8 unused1068[0x01068-0x0fd0];
1544
1545/*0x01068*/ u64 kdfc_entry_type_sel_0;
1546#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(val) vxge_vBIT(val, 6, 2)
1547#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(val) vxge_vBIT(val, 14, 2)
1548#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(val) vxge_vBIT(val, 22, 2)
1549#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(val) vxge_vBIT(val, 30, 2)
1550#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(val) vxge_vBIT(val, 38, 2)
1551#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(val) vxge_vBIT(val, 46, 2)
1552#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(val) vxge_vBIT(val, 54, 2)
1553#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(val) vxge_vBIT(val, 62, 2)
1554/*0x01070*/ u64 kdfc_entry_type_sel_1;
1555#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(val) vxge_vBIT(val, 6, 2)
1556/*0x01078*/ u64 kdfc_fifo_0_ctrl;
1557#define VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
1558#define VXGE_HW_WEIGHTED_RR_SERVICE_STATES 176
1559#define VXGE_HW_WRR_FIFO_SERVICE_STATES 153
1560
1561 u8 unused1100[0x01100-0x1080];
1562
1563/*0x01100*/ u64 kdfc_fifo_17_ctrl;
1564#define VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
1565
1566 u8 unused1600[0x01600-0x1108];
1567
1568/*0x01600*/ u64 rxmac_int_status;
1569#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_GEN_ERR_RXMAC_GEN_INT vxge_mBIT(3)
1570#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_ECC_ERR_RXMAC_ECC_INT vxge_mBIT(7)
1571#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_VARIOUS_ERR_RXMAC_VARIOUS_INT \
1572 vxge_mBIT(11)
1573/*0x01608*/ u64 rxmac_int_mask;
1574 u8 unused01618[0x01618-0x01610];
1575
1576/*0x01618*/ u64 rxmac_gen_err_reg;
1577/*0x01620*/ u64 rxmac_gen_err_mask;
1578/*0x01628*/ u64 rxmac_gen_err_alarm;
1579/*0x01630*/ u64 rxmac_ecc_err_reg;
1580#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_SG_ERR(val) \
1581 vxge_vBIT(val, 0, 4)
1582#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_DB_ERR(val) \
1583 vxge_vBIT(val, 4, 4)
1584#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_SG_ERR(val) \
1585 vxge_vBIT(val, 8, 4)
1586#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_DB_ERR(val) \
1587 vxge_vBIT(val, 12, 4)
1588#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_SG_ERR(val) \
1589 vxge_vBIT(val, 16, 4)
1590#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_DB_ERR(val) \
1591 vxge_vBIT(val, 20, 4)
1592#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_SG_ERR(val) \
1593 vxge_vBIT(val, 24, 2)
1594#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_DB_ERR(val) \
1595 vxge_vBIT(val, 26, 2)
1596#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_SG_ERR(val) \
1597 vxge_vBIT(val, 28, 2)
1598#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_DB_ERR(val) \
1599 vxge_vBIT(val, 30, 2)
1600#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_SG_ERR vxge_mBIT(32)
1601#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_DB_ERR vxge_mBIT(33)
1602#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_SG_ERR vxge_mBIT(34)
1603#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_DB_ERR vxge_mBIT(35)
1604#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_SG_ERR vxge_mBIT(36)
1605#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_DB_ERR vxge_mBIT(37)
1606#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_SG_ERR vxge_mBIT(38)
1607#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_DB_ERR vxge_mBIT(39)
1608#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_SG_ERR(val) \
1609 vxge_vBIT(val, 40, 7)
1610#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_DB_ERR(val) \
1611 vxge_vBIT(val, 47, 7)
1612#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_SG_ERR(val) \
1613 vxge_vBIT(val, 54, 3)
1614#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_DB_ERR(val) \
1615 vxge_vBIT(val, 57, 3)
1616#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_SG_ERR \
1617 vxge_mBIT(60)
1618#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_DB_ERR \
1619 vxge_mBIT(61)
1620/*0x01638*/ u64 rxmac_ecc_err_mask;
1621/*0x01640*/ u64 rxmac_ecc_err_alarm;
1622/*0x01648*/ u64 rxmac_various_err_reg;
1623#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT0_FSM_ERR vxge_mBIT(0)
1624#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT1_FSM_ERR vxge_mBIT(1)
1625#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT2_FSM_ERR vxge_mBIT(2)
1626#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMACJ_RMACJ_FSM_ERR vxge_mBIT(3)
1627/*0x01650*/ u64 rxmac_various_err_mask;
1628/*0x01658*/ u64 rxmac_various_err_alarm;
1629/*0x01660*/ u64 rxmac_gen_cfg;
1630#define VXGE_HW_RXMAC_GEN_CFG_SCALE_RMAC_UTIL vxge_mBIT(11)
1631/*0x01668*/ u64 rxmac_authorize_all_addr;
1632#define VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(n) vxge_mBIT(n)
1633/*0x01670*/ u64 rxmac_authorize_all_vid;
1634#define VXGE_HW_RXMAC_AUTHORIZE_ALL_VID_VP(n) vxge_mBIT(n)
1635 u8 unused016c0[0x016c0-0x01678];
1636
1637/*0x016c0*/ u64 rxmac_red_rate_repl_queue;
1638#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
1639#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
1640#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
1641#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
1642#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
1643#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
1644#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
1645#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
1646#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_TRICKLE_EN vxge_mBIT(35)
1647 u8 unused016e0[0x016e0-0x016c8];
1648
1649/*0x016e0*/ u64 rxmac_cfg0_port[3];
1650#define VXGE_HW_RXMAC_CFG0_PORT_RMAC_EN vxge_mBIT(3)
1651#define VXGE_HW_RXMAC_CFG0_PORT_STRIP_FCS vxge_mBIT(7)
1652#define VXGE_HW_RXMAC_CFG0_PORT_DISCARD_PFRM vxge_mBIT(11)
1653#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_FCS_ERR vxge_mBIT(15)
1654#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LONG_ERR vxge_mBIT(19)
1655#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_USIZED_ERR vxge_mBIT(23)
1656#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LEN_MISMATCH vxge_mBIT(27)
1657#define VXGE_HW_RXMAC_CFG0_PORT_MAX_PYLD_LEN(val) vxge_vBIT(val, 50, 14)
1658 u8 unused01710[0x01710-0x016f8];
1659
1660/*0x01710*/ u64 rxmac_cfg2_port[3];
1661#define VXGE_HW_RXMAC_CFG2_PORT_PROM_EN vxge_mBIT(3)
1662/*0x01728*/ u64 rxmac_pause_cfg_port[3];
1663#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN vxge_mBIT(3)
1664#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN vxge_mBIT(7)
1665#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_ACCEL_SEND(val) vxge_vBIT(val, 9, 3)
1666#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_DUAL_THR vxge_mBIT(15)
1667#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME(val) vxge_vBIT(val, 20, 16)
1668#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_FCS_ERR vxge_mBIT(39)
1669#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_LEN_ERR vxge_mBIT(43)
1670#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_LIMITER_EN vxge_mBIT(47)
1671#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT(val) vxge_vBIT(val, 48, 8)
1672#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_PERMIT_RATEMGMT_CTRL vxge_mBIT(59)
1673 u8 unused01758[0x01758-0x01740];
1674
1675/*0x01758*/ u64 rxmac_red_cfg0_port[3];
1676#define VXGE_HW_RXMAC_RED_CFG0_PORT_RED_EN_VP(n) vxge_mBIT(n)
1677/*0x01770*/ u64 rxmac_red_cfg1_port[3];
1678#define VXGE_HW_RXMAC_RED_CFG1_PORT_FINE_EN vxge_mBIT(3)
1679#define VXGE_HW_RXMAC_RED_CFG1_PORT_RED_EN_REPL_QUEUE vxge_mBIT(11)
1680/*0x01788*/ u64 rxmac_red_cfg2_port[3];
1681#define VXGE_HW_RXMAC_RED_CFG2_PORT_TRICKLE_EN_VP(n) vxge_mBIT(n)
1682/*0x017a0*/ u64 rxmac_link_util_port[3];
1683#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_UTILIZATION(val) \
1684 vxge_vBIT(val, 1, 7)
1685#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
1686#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_FRAC_UTIL(val) \
1687 vxge_vBIT(val, 12, 4)
1688#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
1689#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_SCALE_FACTOR vxge_mBIT(23)
1690 u8 unused017d0[0x017d0-0x017b8];
1691
1692/*0x017d0*/ u64 rxmac_status_port[3];
1693#define VXGE_HW_RXMAC_STATUS_PORT_RMAC_RX_FRM_RCVD vxge_mBIT(3)
1694 u8 unused01800[0x01800-0x017e8];
1695
1696/*0x01800*/ u64 rxmac_rx_pa_cfg0;
1697#define VXGE_HW_RXMAC_RX_PA_CFG0_IGNORE_FRAME_ERR vxge_mBIT(3)
1698#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_SNAP_AB_N vxge_mBIT(7)
1699#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_HAO vxge_mBIT(18)
1700#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(19)
1701#define VXGE_HW_RXMAC_RX_PA_CFG0_IPV6_STOP_SEARCHING vxge_mBIT(23)
1702#define VXGE_HW_RXMAC_RX_PA_CFG0_NO_PS_IF_UNKNOWN vxge_mBIT(27)
1703#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_ETYPE vxge_mBIT(35)
1704#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L3_CSUM_ERR vxge_mBIT(39)
1705#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR vxge_mBIT(43)
1706#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L4_CSUM_ERR vxge_mBIT(47)
1707#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR vxge_mBIT(51)
1708#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_RPA_ERR vxge_mBIT(55)
1709#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_RPA_ERR vxge_mBIT(59)
1710#define VXGE_HW_RXMAC_RX_PA_CFG0_JUMBO_SNAP_EN vxge_mBIT(63)
1711/*0x01808*/ u64 rxmac_rx_pa_cfg1;
1712#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_TCP_INCL_PH vxge_mBIT(3)
1713#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_TCP_INCL_PH vxge_mBIT(7)
1714#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_UDP_INCL_PH vxge_mBIT(11)
1715#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_UDP_INCL_PH vxge_mBIT(15)
1716#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_L4_INCL_CF vxge_mBIT(19)
1717#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG vxge_mBIT(23)
1718 u8 unused01828[0x01828-0x01810];
1719
1720/*0x01828*/ u64 rts_mgr_cfg0;
1721#define VXGE_HW_RTS_MGR_CFG0_RTS_DP_SP_PRIORITY vxge_mBIT(3)
1722#define VXGE_HW_RTS_MGR_CFG0_FLEX_L4PRTCL_VALUE(val) vxge_vBIT(val, 24, 8)
1723#define VXGE_HW_RTS_MGR_CFG0_ICMP_TRASH vxge_mBIT(35)
1724#define VXGE_HW_RTS_MGR_CFG0_TCPSYN_TRASH vxge_mBIT(39)
1725#define VXGE_HW_RTS_MGR_CFG0_ZL4PYLD_TRASH vxge_mBIT(43)
1726#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_TCP_TRASH vxge_mBIT(47)
1727#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_UDP_TRASH vxge_mBIT(51)
1728#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
1729#define VXGE_HW_RTS_MGR_CFG0_IPFRAG_TRASH vxge_mBIT(59)
1730/*0x01830*/ u64 rts_mgr_cfg1;
1731#define VXGE_HW_RTS_MGR_CFG1_DA_ACTIVE_TABLE vxge_mBIT(3)
1732#define VXGE_HW_RTS_MGR_CFG1_PN_ACTIVE_TABLE vxge_mBIT(7)
1733/*0x01838*/ u64 rts_mgr_criteria_priority;
1734#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ETYPE(val) vxge_vBIT(val, 5, 3)
1735#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ICMP_TCPSYN(val) vxge_vBIT(val, 9, 3)
1736#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PN(val) vxge_vBIT(val, 13, 3)
1737#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RANGE_L4PN(val) vxge_vBIT(val, 17, 3)
1738#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RTH_IT(val) vxge_vBIT(val, 21, 3)
1739#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_DS(val) vxge_vBIT(val, 25, 3)
1740#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_QOS(val) vxge_vBIT(val, 29, 3)
1741#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ZL4PYLD(val) vxge_vBIT(val, 33, 3)
1742#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PRTCL(val) vxge_vBIT(val, 37, 3)
1743/*0x01840*/ u64 rts_mgr_da_pause_cfg;
1744#define VXGE_HW_RTS_MGR_DA_PAUSE_CFG_VPATH_VECTOR(val) vxge_vBIT(val, 0, 17)
1745/*0x01848*/ u64 rts_mgr_da_slow_proto_cfg;
1746#define VXGE_HW_RTS_MGR_DA_SLOW_PROTO_CFG_VPATH_VECTOR(val) \
1747 vxge_vBIT(val, 0, 17)
1748 u8 unused01890[0x01890-0x01850];
1749/*0x01890*/ u64 rts_mgr_cbasin_cfg;
1750 u8 unused01968[0x01968-0x01898];
1751
1752/*0x01968*/ u64 dbg_stat_rx_any_frms;
1753#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT0_RX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
1754#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT1_RX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
1755#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT2_RX_ANY_FRMS(val) \
1756 vxge_vBIT(val, 16, 8)
1757 u8 unused01a00[0x01a00-0x01970];
1758
1759/*0x01a00*/ u64 rxmac_red_rate_vp[17];
1760#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
1761#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
1762#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
1763#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
1764#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
1765#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
1766#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
1767#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
1768 u8 unused01e00[0x01e00-0x01a88];
1769
1770/*0x01e00*/ u64 xgmac_int_status;
1771#define VXGE_HW_XGMAC_INT_STATUS_XMAC_GEN_ERR_XMAC_GEN_INT vxge_mBIT(3)
1772#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT0_XMAC_LINK_INT_PORT0 \
1773 vxge_mBIT(7)
1774#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT1_XMAC_LINK_INT_PORT1 \
1775 vxge_mBIT(11)
1776#define VXGE_HW_XGMAC_INT_STATUS_XGXS_GEN_ERR_XGXS_GEN_INT vxge_mBIT(15)
1777#define VXGE_HW_XGMAC_INT_STATUS_ASIC_NTWK_ERR_ASIC_NTWK_INT vxge_mBIT(19)
1778#define VXGE_HW_XGMAC_INT_STATUS_ASIC_GPIO_ERR_ASIC_GPIO_INT vxge_mBIT(23)
1779/*0x01e08*/ u64 xgmac_int_mask;
1780/*0x01e10*/ u64 xmac_gen_err_reg;
1781#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_ACTOR_CHURN_DETECTED \
1782 vxge_mBIT(7)
1783#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_PARTNER_CHURN_DETECTED \
1784 vxge_mBIT(11)
1785#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_RECEIVED_LACPDU vxge_mBIT(15)
1786#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_ACTOR_CHURN_DETECTED \
1787 vxge_mBIT(19)
1788#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_PARTNER_CHURN_DETECTED \
1789 vxge_mBIT(23)
1790#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_RECEIVED_LACPDU vxge_mBIT(27)
1791#define VXGE_HW_XMAC_GEN_ERR_REG_XLCM_LAG_FAILOVER_DETECTED vxge_mBIT(31)
1792#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_SG_ERR(val) \
1793 vxge_vBIT(val, 40, 2)
1794#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_DB_ERR(val) \
1795 vxge_vBIT(val, 42, 2)
1796#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_SG_ERR(val) \
1797 vxge_vBIT(val, 44, 2)
1798#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_DB_ERR(val) \
1799 vxge_vBIT(val, 46, 2)
1800#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_SG_ERR(val) \
1801 vxge_vBIT(val, 48, 2)
1802#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_DB_ERR(val) \
1803 vxge_vBIT(val, 50, 2)
1804#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_SG_ERR(val) \
1805 vxge_vBIT(val, 52, 2)
1806#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_DB_ERR(val) \
1807 vxge_vBIT(val, 54, 2)
1808#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_SG_ERR(val) \
1809 vxge_vBIT(val, 56, 2)
1810#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_DB_ERR(val) \
1811 vxge_vBIT(val, 58, 2)
1812#define VXGE_HW_XMAC_GEN_ERR_REG_XMACJ_XMAC_FSM_ERR vxge_mBIT(63)
1813/*0x01e18*/ u64 xmac_gen_err_mask;
1814/*0x01e20*/ u64 xmac_gen_err_alarm;
1815/*0x01e28*/ u64 xmac_link_err_port0_reg;
1816#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_DOWN vxge_mBIT(3)
1817#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_UP vxge_mBIT(7)
1818#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_DOWN vxge_mBIT(11)
1819#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_UP vxge_mBIT(15)
1820#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_FAULT \
1821 vxge_mBIT(19)
1822#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_OK vxge_mBIT(23)
1823#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_DOWN vxge_mBIT(27)
1824#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_UP vxge_mBIT(31)
1825#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_RATE_CHANGE vxge_mBIT(35)
1826#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_LASI_INV vxge_mBIT(39)
1827#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMDIO_MDIO_MGR_ACCESS_COMPLETE \
1828 vxge_mBIT(47)
1829/*0x01e30*/ u64 xmac_link_err_port0_mask;
1830/*0x01e38*/ u64 xmac_link_err_port0_alarm;
1831/*0x01e40*/ u64 xmac_link_err_port1_reg;
1832/*0x01e48*/ u64 xmac_link_err_port1_mask;
1833/*0x01e50*/ u64 xmac_link_err_port1_alarm;
1834/*0x01e58*/ u64 xgxs_gen_err_reg;
1835#define VXGE_HW_XGXS_GEN_ERR_REG_XGXS_XGXS_FSM_ERR vxge_mBIT(63)
1836/*0x01e60*/ u64 xgxs_gen_err_mask;
1837/*0x01e68*/ u64 xgxs_gen_err_alarm;
1838/*0x01e70*/ u64 asic_ntwk_err_reg;
1839#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_DOWN vxge_mBIT(3)
1840#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_UP vxge_mBIT(7)
1841#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_DOWN vxge_mBIT(11)
1842#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_UP vxge_mBIT(15)
1843#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT vxge_mBIT(19)
1844#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
1845/*0x01e78*/ u64 asic_ntwk_err_mask;
1846/*0x01e80*/ u64 asic_ntwk_err_alarm;
1847/*0x01e88*/ u64 asic_gpio_err_reg;
1848#define VXGE_HW_ASIC_GPIO_ERR_REG_XMACJ_GPIO_INT(n) vxge_mBIT(n)
1849/*0x01e90*/ u64 asic_gpio_err_mask;
1850/*0x01e98*/ u64 asic_gpio_err_alarm;
1851/*0x01ea0*/ u64 xgmac_gen_status;
1852#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_OK vxge_mBIT(3)
1853#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_DATA_RATE vxge_mBIT(11)
1854/*0x01ea8*/ u64 xgmac_gen_fw_memo_status;
1855#define VXGE_HW_XGMAC_GEN_FW_MEMO_STATUS_XMACJ_EVENTS_PENDING(val) \
1856 vxge_vBIT(val, 0, 17)
1857/*0x01eb0*/ u64 xgmac_gen_fw_memo_mask;
1858#define VXGE_HW_XGMAC_GEN_FW_MEMO_MASK_MASK(val) vxge_vBIT(val, 0, 64)
1859/*0x01eb8*/ u64 xgmac_gen_fw_vpath_to_vsport_status;
1860#define VXGE_HW_XGMAC_GEN_FW_VPATH_TO_VSPORT_STATUS_XMACJ_EVENTS_PENDING(val) \
1861 vxge_vBIT(val, 0, 17)
1862/*0x01ec0*/ u64 xgmac_main_cfg_port[2];
1863#define VXGE_HW_XGMAC_MAIN_CFG_PORT_PORT_EN vxge_mBIT(3)
1864 u8 unused01f40[0x01f40-0x01ed0];
1865
1866/*0x01f40*/ u64 xmac_gen_cfg;
1867#define VXGE_HW_XMAC_GEN_CFG_RATEMGMT_MAC_RATE_SEL(val) vxge_vBIT(val, 2, 2)
1868#define VXGE_HW_XMAC_GEN_CFG_TX_HEAD_DROP_WHEN_FAULT vxge_mBIT(7)
1869#define VXGE_HW_XMAC_GEN_CFG_FAULT_BEHAVIOUR vxge_mBIT(27)
1870#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_UP(val) vxge_vBIT(val, 28, 4)
1871#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_DOWN(val) vxge_vBIT(val, 32, 4)
1872/*0x01f48*/ u64 xmac_timestamp;
1873#define VXGE_HW_XMAC_TIMESTAMP_EN vxge_mBIT(3)
1874#define VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(val) vxge_vBIT(val, 6, 2)
1875#define VXGE_HW_XMAC_TIMESTAMP_INTERVAL(val) vxge_vBIT(val, 12, 4)
1876#define VXGE_HW_XMAC_TIMESTAMP_TIMER_RESTART vxge_mBIT(19)
1877#define VXGE_HW_XMAC_TIMESTAMP_XMACJ_ROLLOVER_CNT(val) vxge_vBIT(val, 32, 16)
1878/*0x01f50*/ u64 xmac_stats_gen_cfg;
1879#define VXGE_HW_XMAC_STATS_GEN_CFG_PRTAGGR_CUM_TIMER(val) vxge_vBIT(val, 4, 4)
1880#define VXGE_HW_XMAC_STATS_GEN_CFG_VPATH_CUM_TIMER(val) vxge_vBIT(val, 8, 4)
1881#define VXGE_HW_XMAC_STATS_GEN_CFG_VLAN_HANDLING vxge_mBIT(15)
1882/*0x01f58*/ u64 xmac_stats_sys_cmd;
1883#define VXGE_HW_XMAC_STATS_SYS_CMD_OP(val) vxge_vBIT(val, 5, 3)
1884#define VXGE_HW_XMAC_STATS_SYS_CMD_STROBE vxge_mBIT(15)
1885#define VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(val) vxge_vBIT(val, 27, 5)
1886#define VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
1887/*0x01f60*/ u64 xmac_stats_sys_data;
1888#define VXGE_HW_XMAC_STATS_SYS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
1889 u8 unused01f80[0x01f80-0x01f68];
1890
1891/*0x01f80*/ u64 asic_ntwk_ctrl;
1892#define VXGE_HW_ASIC_NTWK_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
1893#define VXGE_HW_ASIC_NTWK_CTRL_PORT0_REQ_TEST_PORT vxge_mBIT(11)
1894#define VXGE_HW_ASIC_NTWK_CTRL_PORT1_REQ_TEST_PORT vxge_mBIT(15)
1895/*0x01f88*/ u64 asic_ntwk_cfg_show_port_info;
1896#define VXGE_HW_ASIC_NTWK_CFG_SHOW_PORT_INFO_VP(n) vxge_mBIT(n)
1897/*0x01f90*/ u64 asic_ntwk_cfg_port_num;
1898#define VXGE_HW_ASIC_NTWK_CFG_PORT_NUM_VP(n) vxge_mBIT(n)
1899/*0x01f98*/ u64 xmac_cfg_port[3];
1900#define VXGE_HW_XMAC_CFG_PORT_XGMII_LOOPBACK vxge_mBIT(3)
1901#define VXGE_HW_XMAC_CFG_PORT_XGMII_REVERSE_LOOPBACK vxge_mBIT(7)
1902#define VXGE_HW_XMAC_CFG_PORT_XGMII_TX_BEHAV vxge_mBIT(11)
1903#define VXGE_HW_XMAC_CFG_PORT_XGMII_RX_BEHAV vxge_mBIT(15)
1904/*0x01fb0*/ u64 xmac_station_addr_port[2];
1905#define VXGE_HW_XMAC_STATION_ADDR_PORT_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
1906 u8 unused02020[0x02020-0x01fc0];
1907
1908/*0x02020*/ u64 lag_cfg;
1909#define VXGE_HW_LAG_CFG_EN vxge_mBIT(3)
1910#define VXGE_HW_LAG_CFG_MODE(val) vxge_vBIT(val, 6, 2)
1911#define VXGE_HW_LAG_CFG_TX_DISCARD_BEHAV vxge_mBIT(11)
1912#define VXGE_HW_LAG_CFG_RX_DISCARD_BEHAV vxge_mBIT(15)
1913#define VXGE_HW_LAG_CFG_PREF_INDIV_PORT_NUM vxge_mBIT(19)
1914/*0x02028*/ u64 lag_status;
1915#define VXGE_HW_LAG_STATUS_XLCM_WAITING_TO_FAILBACK vxge_mBIT(3)
1916#define VXGE_HW_LAG_STATUS_XLCM_TIMER_VAL_COLD_FAILOVER(val) \
1917 vxge_vBIT(val, 8, 8)
1918/*0x02030*/ u64 lag_active_passive_cfg;
1919#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_HOT_STANDBY vxge_mBIT(3)
1920#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_LACP_DECIDES vxge_mBIT(7)
1921#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_PREF_ACTIVE_PORT_NUM vxge_mBIT(11)
1922#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_AUTO_FAILBACK vxge_mBIT(15)
1923#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_FAILBACK_EN vxge_mBIT(19)
1924#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_COLD_FAILOVER_TIMEOUT(val) \
1925 vxge_vBIT(val, 32, 16)
1926 u8 unused02040[0x02040-0x02038];
1927
1928/*0x02040*/ u64 lag_lacp_cfg;
1929#define VXGE_HW_LAG_LACP_CFG_EN vxge_mBIT(3)
1930#define VXGE_HW_LAG_LACP_CFG_LACP_BEGIN vxge_mBIT(7)
1931#define VXGE_HW_LAG_LACP_CFG_DISCARD_LACP vxge_mBIT(11)
1932#define VXGE_HW_LAG_LACP_CFG_LIBERAL_LEN_CHK vxge_mBIT(15)
1933/*0x02048*/ u64 lag_timer_cfg_1;
1934#define VXGE_HW_LAG_TIMER_CFG_1_FAST_PER(val) vxge_vBIT(val, 0, 16)
1935#define VXGE_HW_LAG_TIMER_CFG_1_SLOW_PER(val) vxge_vBIT(val, 16, 16)
1936#define VXGE_HW_LAG_TIMER_CFG_1_SHORT_TIMEOUT(val) vxge_vBIT(val, 32, 16)
1937#define VXGE_HW_LAG_TIMER_CFG_1_LONG_TIMEOUT(val) vxge_vBIT(val, 48, 16)
1938/*0x02050*/ u64 lag_timer_cfg_2;
1939#define VXGE_HW_LAG_TIMER_CFG_2_CHURN_DET(val) vxge_vBIT(val, 0, 16)
1940#define VXGE_HW_LAG_TIMER_CFG_2_AGGR_WAIT(val) vxge_vBIT(val, 16, 16)
1941#define VXGE_HW_LAG_TIMER_CFG_2_SHORT_TIMER_SCALE(val) vxge_vBIT(val, 32, 16)
1942#define VXGE_HW_LAG_TIMER_CFG_2_LONG_TIMER_SCALE(val) vxge_vBIT(val, 48, 16)
1943/*0x02058*/ u64 lag_sys_id;
1944#define VXGE_HW_LAG_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
1945#define VXGE_HW_LAG_SYS_ID_USE_PORT_ADDR vxge_mBIT(51)
1946#define VXGE_HW_LAG_SYS_ID_ADDR_SEL vxge_mBIT(55)
1947/*0x02060*/ u64 lag_sys_cfg;
1948#define VXGE_HW_LAG_SYS_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
1949 u8 unused02070[0x02070-0x02068];
1950
1951/*0x02070*/ u64 lag_aggr_addr_cfg[2];
1952#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR(val) vxge_vBIT(val, 0, 48)
1953#define VXGE_HW_LAG_AGGR_ADDR_CFG_USE_PORT_ADDR vxge_mBIT(51)
1954#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR_SEL vxge_mBIT(55)
1955/*0x02080*/ u64 lag_aggr_id_cfg[2];
1956#define VXGE_HW_LAG_AGGR_ID_CFG_ID(val) vxge_vBIT(val, 0, 16)
1957/*0x02090*/ u64 lag_aggr_admin_key[2];
1958#define VXGE_HW_LAG_AGGR_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
1959/*0x020a0*/ u64 lag_aggr_alt_admin_key;
1960#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
1961#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_ALT_AGGR vxge_mBIT(19)
1962/*0x020a8*/ u64 lag_aggr_oper_key[2];
1963#define VXGE_HW_LAG_AGGR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
1964/*0x020b8*/ u64 lag_aggr_partner_sys_id[2];
1965#define VXGE_HW_LAG_AGGR_PARTNER_SYS_ID_LAGC_ADDR(val) vxge_vBIT(val, 0, 48)
1966/*0x020c8*/ u64 lag_aggr_partner_info[2];
1967#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_SYS_PRI(val) vxge_vBIT(val, 0, 16)
1968#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_OPER_KEY(val) \
1969 vxge_vBIT(val, 16, 16)
1970/*0x020d8*/ u64 lag_aggr_state[2];
1971#define VXGE_HW_LAG_AGGR_STATE_LAGC_TX vxge_mBIT(3)
1972#define VXGE_HW_LAG_AGGR_STATE_LAGC_RX vxge_mBIT(7)
1973#define VXGE_HW_LAG_AGGR_STATE_LAGC_READY vxge_mBIT(11)
1974#define VXGE_HW_LAG_AGGR_STATE_LAGC_INDIVIDUAL vxge_mBIT(15)
1975 u8 unused020f0[0x020f0-0x020e8];
1976
1977/*0x020f0*/ u64 lag_port_cfg[2];
1978#define VXGE_HW_LAG_PORT_CFG_EN vxge_mBIT(3)
1979#define VXGE_HW_LAG_PORT_CFG_DISCARD_SLOW_PROTO vxge_mBIT(7)
1980#define VXGE_HW_LAG_PORT_CFG_HOST_CHOSEN_AGGR vxge_mBIT(11)
1981#define VXGE_HW_LAG_PORT_CFG_DISCARD_UNKNOWN_SLOW_PROTO vxge_mBIT(15)
1982/*0x02100*/ u64 lag_port_actor_admin_cfg[2];
1983#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_NUM(val) vxge_vBIT(val, 0, 16)
1984#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_PRI(val) vxge_vBIT(val, 16, 16)
1985#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_10G(val) vxge_vBIT(val, 32, 16)
1986#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_1G(val) vxge_vBIT(val, 48, 16)
1987/*0x02110*/ u64 lag_port_actor_admin_state[2];
1988#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
1989#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
1990#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
1991#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
1992#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_COLLECTING vxge_mBIT(19)
1993#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
1994#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
1995#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_EXPIRED vxge_mBIT(31)
1996/*0x02120*/ u64 lag_port_partner_admin_sys_id[2];
1997#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
1998/*0x02130*/ u64 lag_port_partner_admin_cfg[2];
1999#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
2000#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_KEY(val) vxge_vBIT(val, 16, 16)
2001#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_NUM(val) \
2002 vxge_vBIT(val, 32, 16)
2003#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_PRI(val) \
2004 vxge_vBIT(val, 48, 16)
2005/*0x02140*/ u64 lag_port_partner_admin_state[2];
2006#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
2007#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
2008#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
2009#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
2010#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_COLLECTING vxge_mBIT(19)
2011#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
2012#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
2013#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_EXPIRED vxge_mBIT(31)
2014/*0x02150*/ u64 lag_port_to_aggr[2];
2015#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_ID(val) vxge_vBIT(val, 0, 16)
2016#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_VLD_ID vxge_mBIT(19)
2017/*0x02160*/ u64 lag_port_actor_oper_key[2];
2018#define VXGE_HW_LAG_PORT_ACTOR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
2019/*0x02170*/ u64 lag_port_actor_oper_state[2];
2020#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
2021#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
2022#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
2023#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_SYNCHRONIZATION vxge_mBIT(15)
2024#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
2025#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
2026#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
2027#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
2028/*0x02180*/ u64 lag_port_partner_oper_sys_id[2];
2029#define VXGE_HW_LAG_PORT_PARTNER_OPER_SYS_ID_LAGC_ADDR(val) \
2030 vxge_vBIT(val, 0, 48)
2031/*0x02190*/ u64 lag_port_partner_oper_info[2];
2032#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_SYS_PRI(val) \
2033 vxge_vBIT(val, 0, 16)
2034#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_KEY(val) \
2035 vxge_vBIT(val, 16, 16)
2036#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_NUM(val) \
2037 vxge_vBIT(val, 32, 16)
2038#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_PRI(val) \
2039 vxge_vBIT(val, 48, 16)
2040/*0x021a0*/ u64 lag_port_partner_oper_state[2];
2041#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
2042#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
2043#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
2044#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_SYNCHRONIZATION \
2045 vxge_mBIT(15)
2046#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
2047#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
2048#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
2049#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
2050/*0x021b0*/ u64 lag_port_state_vars[2];
2051#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_READY vxge_mBIT(3)
2052#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_SELECTED(val) vxge_vBIT(val, 6, 2)
2053#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_AGGR_NUM vxge_mBIT(11)
2054#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_MOVED vxge_mBIT(15)
2055#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_ENABLED vxge_mBIT(18)
2056#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_DISABLED vxge_mBIT(19)
2057#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_NTT vxge_mBIT(23)
2058#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN vxge_mBIT(27)
2059#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN vxge_mBIT(31)
2060#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_INFO_LEN_MISMATCH \
2061 vxge_mBIT(32)
2062#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_INFO_LEN_MISMATCH \
2063 vxge_mBIT(33)
2064#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_COLL_INFO_LEN_MISMATCH vxge_mBIT(34)
2065#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_TERM_INFO_LEN_MISMATCH vxge_mBIT(35)
2066#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_RX_FSM_STATE(val) vxge_vBIT(val, 37, 3)
2067#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_FSM_STATE(val) \
2068 vxge_vBIT(val, 41, 3)
2069#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_REASON(val) vxge_vBIT(val, 44, 4)
2070#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_STATE vxge_mBIT(54)
2071#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_STATE vxge_mBIT(55)
2072#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_COUNT(val) \
2073 vxge_vBIT(val, 56, 4)
2074#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_COUNT(val) \
2075 vxge_vBIT(val, 60, 4)
2076/*0x021c0*/ u64 lag_port_timer_cntr[2];
2077#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_CURRENT_WHILE(val) vxge_vBIT(val, 0, 8)
2078#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PERIODIC_WHILE(val) \
2079 vxge_vBIT(val, 8, 8)
2080#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_WAIT_WHILE(val) vxge_vBIT(val, 16, 8)
2081#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_TX_LACP(val) vxge_vBIT(val, 24, 8)
2082#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_SYNC_TRANSITION_COUNT(val) \
2083 vxge_vBIT(val, 32, 8)
2084#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_SYNC_TRANSITION_COUNT(val) \
2085 vxge_vBIT(val, 40, 8)
2086#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_CHANGE_COUNT(val) \
2087 vxge_vBIT(val, 48, 8)
2088#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_CHANGE_COUNT(val) \
2089 vxge_vBIT(val, 56, 8)
2090 u8 unused02208[0x02700-0x021d0];
2091
2092/*0x02700*/ u64 rtdma_int_status;
2093#define VXGE_HW_RTDMA_INT_STATUS_PDA_ALARM_PDA_INT vxge_mBIT(1)
2094#define VXGE_HW_RTDMA_INT_STATUS_PCC_ERROR_PCC_INT vxge_mBIT(2)
2095#define VXGE_HW_RTDMA_INT_STATUS_LSO_ERROR_LSO_INT vxge_mBIT(4)
2096#define VXGE_HW_RTDMA_INT_STATUS_SM_ERROR_SM_INT vxge_mBIT(5)
2097/*0x02708*/ u64 rtdma_int_mask;
2098/*0x02710*/ u64 pda_alarm_reg;
2099#define VXGE_HW_PDA_ALARM_REG_PDA_HSC_FIFO_ERR vxge_mBIT(0)
2100#define VXGE_HW_PDA_ALARM_REG_PDA_SM_ERR vxge_mBIT(1)
2101/*0x02718*/ u64 pda_alarm_mask;
2102/*0x02720*/ u64 pda_alarm_alarm;
2103/*0x02728*/ u64 pcc_error_reg;
2104#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_SBE(n) vxge_mBIT(n)
2105#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_SBE(n) vxge_mBIT(n)
2106#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_DBE(n) vxge_mBIT(n)
2107#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_DBE(n) vxge_mBIT(n)
2108#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FSM_ERR_ALARM(n) vxge_mBIT(n)
2109#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_SERR(n) vxge_mBIT(n)
2110/*0x02730*/ u64 pcc_error_mask;
2111/*0x02738*/ u64 pcc_error_alarm;
2112/*0x02740*/ u64 lso_error_reg;
2113#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_ABORT(n) vxge_mBIT(n)
2114#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_FSM_ERR_ALARM(n) vxge_mBIT(n)
2115/*0x02748*/ u64 lso_error_mask;
2116/*0x02750*/ u64 lso_error_alarm;
2117/*0x02758*/ u64 sm_error_reg;
2118#define VXGE_HW_SM_ERROR_REG_SM_FSM_ERR_ALARM vxge_mBIT(15)
2119/*0x02760*/ u64 sm_error_mask;
2120/*0x02768*/ u64 sm_error_alarm;
2121
2122 u8 unused027a8[0x027a8-0x02770];
2123
2124/*0x027a8*/ u64 txd_ownership_ctrl;
2125#define VXGE_HW_TXD_OWNERSHIP_CTRL_KEEP_OWNERSHIP vxge_mBIT(7)
2126/*0x027b0*/ u64 pcc_cfg;
2127#define VXGE_HW_PCC_CFG_PCC_ENABLE(n) vxge_mBIT(n)
2128#define VXGE_HW_PCC_CFG_PCC_ECC_ENABLE_N(n) vxge_mBIT(n)
2129/*0x027b8*/ u64 pcc_control;
2130#define VXGE_HW_PCC_CONTROL_FE_ENABLE(val) vxge_vBIT(val, 6, 2)
2131#define VXGE_HW_PCC_CONTROL_EARLY_ASSIGN_EN vxge_mBIT(15)
2132#define VXGE_HW_PCC_CONTROL_UNBLOCK_DB_ERR vxge_mBIT(31)
2133/*0x027c0*/ u64 pda_status1;
2134#define VXGE_HW_PDA_STATUS1_PDA_WRAP_0_CTR(val) vxge_vBIT(val, 4, 4)
2135#define VXGE_HW_PDA_STATUS1_PDA_WRAP_1_CTR(val) vxge_vBIT(val, 12, 4)
2136#define VXGE_HW_PDA_STATUS1_PDA_WRAP_2_CTR(val) vxge_vBIT(val, 20, 4)
2137#define VXGE_HW_PDA_STATUS1_PDA_WRAP_3_CTR(val) vxge_vBIT(val, 28, 4)
2138#define VXGE_HW_PDA_STATUS1_PDA_WRAP_4_CTR(val) vxge_vBIT(val, 36, 4)
2139#define VXGE_HW_PDA_STATUS1_PDA_WRAP_5_CTR(val) vxge_vBIT(val, 44, 4)
2140#define VXGE_HW_PDA_STATUS1_PDA_WRAP_6_CTR(val) vxge_vBIT(val, 52, 4)
2141#define VXGE_HW_PDA_STATUS1_PDA_WRAP_7_CTR(val) vxge_vBIT(val, 60, 4)
2142/*0x027c8*/ u64 rtdma_bw_timer;
2143#define VXGE_HW_RTDMA_BW_TIMER_TIMER_CTRL(val) vxge_vBIT(val, 12, 4)
2144
2145 u8 unused02900[0x02900-0x027d0];
2146/*0x02900*/ u64 g3cmct_int_status;
2147#define VXGE_HW_G3CMCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
2148/*0x02908*/ u64 g3cmct_int_mask;
2149/*0x02910*/ u64 g3cmct_err_reg;
2150#define VXGE_HW_G3CMCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
2151#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
2152#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
2153#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
2154#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
2155#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
2156#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
2157/*0x02918*/ u64 g3cmct_err_mask;
2158/*0x02920*/ u64 g3cmct_err_alarm;
2159 u8 unused03000[0x03000-0x02928];
2160
2161/*0x03000*/ u64 mc_int_status;
2162#define VXGE_HW_MC_INT_STATUS_MC_ERR_MC_INT vxge_mBIT(3)
2163#define VXGE_HW_MC_INT_STATUS_GROCRC_ALARM_ROCRC_INT vxge_mBIT(7)
2164#define VXGE_HW_MC_INT_STATUS_FAU_GEN_ERR_FAU_GEN_INT vxge_mBIT(11)
2165#define VXGE_HW_MC_INT_STATUS_FAU_ECC_ERR_FAU_ECC_INT vxge_mBIT(15)
2166/*0x03008*/ u64 mc_int_mask;
2167/*0x03010*/ u64 mc_err_reg;
2168#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_A vxge_mBIT(3)
2169#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_B vxge_mBIT(4)
2170#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_SG_ERR vxge_mBIT(5)
2171#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_0 vxge_mBIT(6)
2172#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_1 vxge_mBIT(7)
2173#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_A vxge_mBIT(10)
2174#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_B vxge_mBIT(11)
2175#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_DB_ERR vxge_mBIT(12)
2176#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_0 vxge_mBIT(13)
2177#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_1 vxge_mBIT(14)
2178#define VXGE_HW_MC_ERR_REG_MC_SM_ERR vxge_mBIT(15)
2179/*0x03018*/ u64 mc_err_mask;
2180/*0x03020*/ u64 mc_err_alarm;
2181/*0x03028*/ u64 grocrc_alarm_reg;
2182#define VXGE_HW_GROCRC_ALARM_REG_XFMD_WR_FIFO_ERR vxge_mBIT(3)
2183#define VXGE_HW_GROCRC_ALARM_REG_WDE2MSR_RD_FIFO_ERR vxge_mBIT(7)
2184/*0x03030*/ u64 grocrc_alarm_mask;
2185/*0x03038*/ u64 grocrc_alarm_alarm;
2186 u8 unused03100[0x03100-0x03040];
2187
2188/*0x03100*/ u64 rx_thresh_cfg_repl;
2189#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
2190#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
2191#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_0(val) vxge_vBIT(val, 16, 8)
2192#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_1(val) vxge_vBIT(val, 24, 8)
2193#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_2(val) vxge_vBIT(val, 32, 8)
2194#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_3(val) vxge_vBIT(val, 40, 8)
2195#define VXGE_HW_RX_THRESH_CFG_REPL_GLOBAL_WOL_EN vxge_mBIT(62)
2196#define VXGE_HW_RX_THRESH_CFG_REPL_EXACT_VP_MATCH_REQ vxge_mBIT(63)
2197 u8 unused033b8[0x033b8-0x03108];
2198
2199/*0x033b8*/ u64 fbmc_ecc_cfg;
2200#define VXGE_HW_FBMC_ECC_CFG_ENABLE(val) vxge_vBIT(val, 3, 5)
2201 u8 unused03400[0x03400-0x033c0];
2202
2203/*0x03400*/ u64 pcipif_int_status;
2204#define VXGE_HW_PCIPIF_INT_STATUS_DBECC_ERR_DBECC_ERR_INT vxge_mBIT(3)
2205#define VXGE_HW_PCIPIF_INT_STATUS_SBECC_ERR_SBECC_ERR_INT vxge_mBIT(7)
2206#define VXGE_HW_PCIPIF_INT_STATUS_GENERAL_ERR_GENERAL_ERR_INT vxge_mBIT(11)
2207#define VXGE_HW_PCIPIF_INT_STATUS_SRPCIM_MSG_SRPCIM_MSG_INT vxge_mBIT(15)
2208#define VXGE_HW_PCIPIF_INT_STATUS_MRPCIM_SPARE_R1_MRPCIM_SPARE_R1_INT \
2209 vxge_mBIT(19)
2210/*0x03408*/ u64 pcipif_int_mask;
2211/*0x03410*/ u64 dbecc_err_reg;
2212#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_BUF_DB_ERR vxge_mBIT(3)
2213#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_SOT_DB_ERR vxge_mBIT(7)
2214#define VXGE_HW_DBECC_ERR_REG_PCI_P_HDR_DB_ERR vxge_mBIT(11)
2215#define VXGE_HW_DBECC_ERR_REG_PCI_P_DATA_DB_ERR vxge_mBIT(15)
2216#define VXGE_HW_DBECC_ERR_REG_PCI_NP_HDR_DB_ERR vxge_mBIT(19)
2217#define VXGE_HW_DBECC_ERR_REG_PCI_NP_DATA_DB_ERR vxge_mBIT(23)
2218/*0x03418*/ u64 dbecc_err_mask;
2219/*0x03420*/ u64 dbecc_err_alarm;
2220/*0x03428*/ u64 sbecc_err_reg;
2221#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_BUF_SG_ERR vxge_mBIT(3)
2222#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_SOT_SG_ERR vxge_mBIT(7)
2223#define VXGE_HW_SBECC_ERR_REG_PCI_P_HDR_SG_ERR vxge_mBIT(11)
2224#define VXGE_HW_SBECC_ERR_REG_PCI_P_DATA_SG_ERR vxge_mBIT(15)
2225#define VXGE_HW_SBECC_ERR_REG_PCI_NP_HDR_SG_ERR vxge_mBIT(19)
2226#define VXGE_HW_SBECC_ERR_REG_PCI_NP_DATA_SG_ERR vxge_mBIT(23)
2227/*0x03430*/ u64 sbecc_err_mask;
2228/*0x03438*/ u64 sbecc_err_alarm;
2229/*0x03440*/ u64 general_err_reg;
2230#define VXGE_HW_GENERAL_ERR_REG_PCI_DROPPED_ILLEGAL_CFG vxge_mBIT(3)
2231#define VXGE_HW_GENERAL_ERR_REG_PCI_ILLEGAL_MEM_MAP_PROG vxge_mBIT(7)
2232#define VXGE_HW_GENERAL_ERR_REG_PCI_LINK_RST_FSM_ERR vxge_mBIT(11)
2233#define VXGE_HW_GENERAL_ERR_REG_PCI_RX_ILLEGAL_TLP_VPLANE vxge_mBIT(15)
2234#define VXGE_HW_GENERAL_ERR_REG_PCI_TRAINING_RESET_DET vxge_mBIT(19)
2235#define VXGE_HW_GENERAL_ERR_REG_PCI_PCI_LINK_DOWN_DET vxge_mBIT(23)
2236#define VXGE_HW_GENERAL_ERR_REG_PCI_RESET_ACK_DLLP vxge_mBIT(27)
2237/*0x03448*/ u64 general_err_mask;
2238/*0x03450*/ u64 general_err_alarm;
2239/*0x03458*/ u64 srpcim_msg_reg;
2240#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE0_RMSG_INT \
2241 vxge_mBIT(0)
2242#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE1_RMSG_INT \
2243 vxge_mBIT(1)
2244#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE2_RMSG_INT \
2245 vxge_mBIT(2)
2246#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE3_RMSG_INT \
2247 vxge_mBIT(3)
2248#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE4_RMSG_INT \
2249 vxge_mBIT(4)
2250#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE5_RMSG_INT \
2251 vxge_mBIT(5)
2252#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE6_RMSG_INT \
2253 vxge_mBIT(6)
2254#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE7_RMSG_INT \
2255 vxge_mBIT(7)
2256#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE8_RMSG_INT \
2257 vxge_mBIT(8)
2258#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE9_RMSG_INT \
2259 vxge_mBIT(9)
2260#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE10_RMSG_INT \
2261 vxge_mBIT(10)
2262#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE11_RMSG_INT \
2263 vxge_mBIT(11)
2264#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE12_RMSG_INT \
2265 vxge_mBIT(12)
2266#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE13_RMSG_INT \
2267 vxge_mBIT(13)
2268#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE14_RMSG_INT \
2269 vxge_mBIT(14)
2270#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE15_RMSG_INT \
2271 vxge_mBIT(15)
2272#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE16_RMSG_INT \
2273 vxge_mBIT(16)
2274/*0x03460*/ u64 srpcim_msg_mask;
2275/*0x03468*/ u64 srpcim_msg_alarm;
2276 u8 unused03600[0x03600-0x03470];
2277
2278/*0x03600*/ u64 gcmg1_int_status;
2279#define VXGE_HW_GCMG1_INT_STATUS_GSSCC_ERR_GSSCC_INT vxge_mBIT(0)
2280#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR0_GSSC0_0_INT vxge_mBIT(1)
2281#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR1_GSSC0_1_INT vxge_mBIT(2)
2282#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR0_GSSC1_0_INT vxge_mBIT(3)
2283#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR1_GSSC1_1_INT vxge_mBIT(4)
2284#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR0_GSSC2_0_INT vxge_mBIT(5)
2285#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR1_GSSC2_1_INT vxge_mBIT(6)
2286#define VXGE_HW_GCMG1_INT_STATUS_UQM_ERR_UQM_INT vxge_mBIT(7)
2287#define VXGE_HW_GCMG1_INT_STATUS_GQCC_ERR_GQCC_INT vxge_mBIT(8)
2288/*0x03608*/ u64 gcmg1_int_mask;
2289 u8 unused03a00[0x03a00-0x03610];
2290
2291/*0x03a00*/ u64 pcmg1_int_status;
2292#define VXGE_HW_PCMG1_INT_STATUS_PSSCC_ERR_PSSCC_INT vxge_mBIT(0)
2293#define VXGE_HW_PCMG1_INT_STATUS_PQCC_ERR_PQCC_INT vxge_mBIT(1)
2294#define VXGE_HW_PCMG1_INT_STATUS_PQCC_CQM_ERR_PQCC_CQM_INT vxge_mBIT(2)
2295#define VXGE_HW_PCMG1_INT_STATUS_PQCC_SQM_ERR_PQCC_SQM_INT vxge_mBIT(3)
2296/*0x03a08*/ u64 pcmg1_int_mask;
2297 u8 unused04000[0x04000-0x03a10];
2298
2299/*0x04000*/ u64 one_int_status;
2300#define VXGE_HW_ONE_INT_STATUS_RXPE_ERR_RXPE_INT vxge_mBIT(7)
2301#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_SG_ECC_ERR_TXPE_BCC_MEM_SG_ECC_INT \
2302 vxge_mBIT(13)
2303#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_DB_ECC_ERR_TXPE_BCC_MEM_DB_ECC_INT \
2304 vxge_mBIT(14)
2305#define VXGE_HW_ONE_INT_STATUS_TXPE_ERR_TXPE_INT vxge_mBIT(15)
2306#define VXGE_HW_ONE_INT_STATUS_DLM_ERR_DLM_INT vxge_mBIT(23)
2307#define VXGE_HW_ONE_INT_STATUS_PE_ERR_PE_INT vxge_mBIT(31)
2308#define VXGE_HW_ONE_INT_STATUS_RPE_ERR_RPE_INT vxge_mBIT(39)
2309#define VXGE_HW_ONE_INT_STATUS_RPE_FSM_ERR_RPE_FSM_INT vxge_mBIT(47)
2310#define VXGE_HW_ONE_INT_STATUS_OES_ERR_OES_INT vxge_mBIT(55)
2311/*0x04008*/ u64 one_int_mask;
2312 u8 unused04818[0x04818-0x04010];
2313
2314/*0x04818*/ u64 noa_wct_ctrl;
2315#define VXGE_HW_NOA_WCT_CTRL_VP_INT_NUM vxge_mBIT(0)
2316/*0x04820*/ u64 rc_cfg2;
2317#define VXGE_HW_RC_CFG2_BUFF1_SIZE(val) vxge_vBIT(val, 0, 16)
2318#define VXGE_HW_RC_CFG2_BUFF2_SIZE(val) vxge_vBIT(val, 16, 16)
2319#define VXGE_HW_RC_CFG2_BUFF3_SIZE(val) vxge_vBIT(val, 32, 16)
2320#define VXGE_HW_RC_CFG2_BUFF4_SIZE(val) vxge_vBIT(val, 48, 16)
2321/*0x04828*/ u64 rc_cfg3;
2322#define VXGE_HW_RC_CFG3_BUFF5_SIZE(val) vxge_vBIT(val, 0, 16)
2323/*0x04830*/ u64 rx_multi_cast_ctrl1;
2324#define VXGE_HW_RX_MULTI_CAST_CTRL1_ENABLE vxge_mBIT(7)
2325#define VXGE_HW_RX_MULTI_CAST_CTRL1_DELAY_COUNT(val) vxge_vBIT(val, 11, 5)
2326/*0x04838*/ u64 rxdm_dbg_rd;
2327#define VXGE_HW_RXDM_DBG_RD_ADDR(val) vxge_vBIT(val, 0, 12)
2328#define VXGE_HW_RXDM_DBG_RD_ENABLE vxge_mBIT(31)
2329/*0x04840*/ u64 rxdm_dbg_rd_data;
2330#define VXGE_HW_RXDM_DBG_RD_DATA_RMC_RXDM_DBG_RD_DATA(val) vxge_vBIT(val, 0, 64)
2331/*0x04848*/ u64 rqa_top_prty_for_vh[17];
2332#define VXGE_HW_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
2333 vxge_vBIT(val, 59, 5)
2334 u8 unused04900[0x04900-0x048d0];
2335
2336/*0x04900*/ u64 tim_status;
2337#define VXGE_HW_TIM_STATUS_TIM_RESET_IN_PROGRESS vxge_mBIT(0)
2338/*0x04908*/ u64 tim_ecc_enable;
2339#define VXGE_HW_TIM_ECC_ENABLE_VBLS_N vxge_mBIT(7)
2340#define VXGE_HW_TIM_ECC_ENABLE_BMAP_N vxge_mBIT(15)
2341#define VXGE_HW_TIM_ECC_ENABLE_BMAP_MSG_N vxge_mBIT(23)
2342/*0x04910*/ u64 tim_bp_ctrl;
2343#define VXGE_HW_TIM_BP_CTRL_RD_XON vxge_mBIT(7)
2344#define VXGE_HW_TIM_BP_CTRL_WR_XON vxge_mBIT(15)
2345#define VXGE_HW_TIM_BP_CTRL_ROCRC_BYP vxge_mBIT(23)
2346/*0x04918*/ u64 tim_resource_assignment_vh[17];
2347#define VXGE_HW_TIM_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
2348/*0x049a0*/ u64 tim_bmap_mapping_vp_err[17];
2349#define VXGE_HW_TIM_BMAP_MAPPING_VP_ERR_TIM_DEST_VPATH(val) vxge_vBIT(val, 3, 5)
2350 u8 unused04b00[0x04b00-0x04a28];
2351
2352/*0x04b00*/ u64 gcmg2_int_status;
2353#define VXGE_HW_GCMG2_INT_STATUS_GXTMC_ERR_GXTMC_INT vxge_mBIT(7)
2354#define VXGE_HW_GCMG2_INT_STATUS_GCP_ERR_GCP_INT vxge_mBIT(15)
2355#define VXGE_HW_GCMG2_INT_STATUS_CMC_ERR_CMC_INT vxge_mBIT(23)
2356/*0x04b08*/ u64 gcmg2_int_mask;
2357/*0x04b10*/ u64 gxtmc_err_reg;
2358#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_DB_ERR(val) vxge_vBIT(val, 0, 4)
2359#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_SG_ERR(val) vxge_vBIT(val, 4, 4)
2360#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMC_RD_DATA_DB_ERR vxge_mBIT(8)
2361#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(9)
2362#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(10)
2363#define VXGE_HW_GXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(11)
2364#define VXGE_HW_GXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(12)
2365#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_FIFO_ERR vxge_mBIT(13)
2366#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_ERR vxge_mBIT(14)
2367#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_FIFO_ERR vxge_mBIT(15)
2368#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_ERR vxge_mBIT(16)
2369#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_DATA_SM_ERR vxge_mBIT(17)
2370#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_CMC0_IF_ERR vxge_mBIT(18)
2371#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_ARB_SM_ERR vxge_mBIT(19)
2372#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_CFC_SM_ERR vxge_mBIT(20)
2373#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_OVERFLOW \
2374 vxge_mBIT(21)
2375#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_UNDERFLOW \
2376 vxge_mBIT(22)
2377#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_SM_ERR vxge_mBIT(23)
2378#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_OVERFLOW \
2379 vxge_mBIT(24)
2380#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_UNDERFLOW \
2381 vxge_mBIT(25)
2382#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_SM_ERR vxge_mBIT(26)
2383#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_SM_ERR vxge_mBIT(27)
2384#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_TAG_ERR vxge_mBIT(28)
2385#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_SM_ERR vxge_mBIT(29)
2386#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_FIFO_ERR vxge_mBIT(30)
2387#define VXGE_HW_GXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_POP_ERR vxge_mBIT(31)
2388#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_CMI_OP_ERR vxge_mBIT(32)
2389#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFETCH_OP_ERR vxge_mBIT(33)
2390#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFIFO_ERR vxge_mBIT(34)
2391#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_ARB_SM_ERR vxge_mBIT(35)
2392/*0x04b18*/ u64 gxtmc_err_mask;
2393/*0x04b20*/ u64 gxtmc_err_alarm;
2394/*0x04b28*/ u64 cmc_err_reg;
2395#define VXGE_HW_CMC_ERR_REG_CMC_CMC_SM_ERR vxge_mBIT(0)
2396/*0x04b30*/ u64 cmc_err_mask;
2397/*0x04b38*/ u64 cmc_err_alarm;
2398/*0x04b40*/ u64 gcp_err_reg;
2399#define VXGE_HW_GCP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(0)
2400#define VXGE_HW_GCP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(1)
2401#define VXGE_HW_GCP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(2)
2402#define VXGE_HW_GCP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(3)
2403/*0x04b48*/ u64 gcp_err_mask;
2404/*0x04b50*/ u64 gcp_err_alarm;
2405 u8 unused04f00[0x04f00-0x04b58];
2406
2407/*0x04f00*/ u64 pcmg2_int_status;
2408#define VXGE_HW_PCMG2_INT_STATUS_PXTMC_ERR_PXTMC_INT vxge_mBIT(7)
2409#define VXGE_HW_PCMG2_INT_STATUS_CP_EXC_CP_XT_EXC_INT vxge_mBIT(15)
2410#define VXGE_HW_PCMG2_INT_STATUS_CP_ERR_CP_ERR_INT vxge_mBIT(23)
2411/*0x04f08*/ u64 pcmg2_int_mask;
2412/*0x04f10*/ u64 pxtmc_err_reg;
2413#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_DB_ERR(val) vxge_vBIT(val, 0, 2)
2414#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FIFO_ERR vxge_mBIT(2)
2415#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_PRSP_FIFO_ERR vxge_mBIT(3)
2416#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_WRSP_FIFO_ERR vxge_mBIT(4)
2417#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FIFO_ERR vxge_mBIT(5)
2418#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_PRSP_FIFO_ERR vxge_mBIT(6)
2419#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_WRSP_FIFO_ERR vxge_mBIT(7)
2420#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FIFO_ERR vxge_mBIT(8)
2421#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_PRSP_FIFO_ERR vxge_mBIT(9)
2422#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_WRSP_FIFO_ERR vxge_mBIT(10)
2423#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(11)
2424#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(12)
2425#define VXGE_HW_PXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(13)
2426#define VXGE_HW_PXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(14)
2427#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_SHADOW_ERR vxge_mBIT(15)
2428#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_SHADOW_ERR vxge_mBIT(16)
2429#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_SHADOW_ERR vxge_mBIT(17)
2430#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_SHADOW_ERR vxge_mBIT(18)
2431#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_SHADOW_ERR vxge_mBIT(19)
2432#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_SHADOW_ERR vxge_mBIT(20)
2433#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_SHADOW_ERR vxge_mBIT(21)
2434#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_SHADOW_ERR vxge_mBIT(22)
2435#define VXGE_HW_PXTMC_ERR_REG_XTMC_RAM_SHADOW_ERR vxge_mBIT(23)
2436#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_SHADOW_ERR vxge_mBIT(24)
2437#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_SHADOW_ERR vxge_mBIT(25)
2438#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FSM_ERR vxge_mBIT(26)
2439#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_FSM_ERR vxge_mBIT(27)
2440#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FSM_ERR vxge_mBIT(28)
2441#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_FSM_ERR vxge_mBIT(29)
2442#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FSM_ERR vxge_mBIT(30)
2443#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_FSM_ERR vxge_mBIT(31)
2444#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_FSM_ERR vxge_mBIT(32)
2445#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_FSM_ERR vxge_mBIT(33)
2446#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_FSM_ERR vxge_mBIT(34)
2447#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_FSM_ERR vxge_mBIT(35)
2448#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_ERR vxge_mBIT(36)
2449#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_ERR vxge_mBIT(37)
2450#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_ERR vxge_mBIT(38)
2451#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_ERR vxge_mBIT(39)
2452#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_ERR vxge_mBIT(40)
2453#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_ERR vxge_mBIT(41)
2454#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_ERR vxge_mBIT(42)
2455#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_ERR vxge_mBIT(43)
2456#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_ERR vxge_mBIT(44)
2457#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_INFO_ERR vxge_mBIT(45)
2458#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_INFO_ERR vxge_mBIT(46)
2459#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_INFO_ERR vxge_mBIT(47)
2460#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_INFO_ERR vxge_mBIT(48)
2461#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_INFO_ERR vxge_mBIT(49)
2462#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_INFO_ERR vxge_mBIT(50)
2463#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_INFO_ERR vxge_mBIT(51)
2464#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_INFO_ERR vxge_mBIT(52)
2465#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_INFO_ERR vxge_mBIT(53)
2466#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_SG_ERR(val) vxge_vBIT(val, 54, 2)
2467#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_DFIFO_PUSH_ERR vxge_mBIT(56)
2468#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_PUSH_ERR vxge_mBIT(57)
2469/*0x04f18*/ u64 pxtmc_err_mask;
2470/*0x04f20*/ u64 pxtmc_err_alarm;
2471/*0x04f28*/ u64 cp_err_reg;
2472#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_SG_ERR(val) vxge_vBIT(val, 0, 8)
2473#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_SG_ERR(val) vxge_vBIT(val, 8, 2)
2474#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_SG_ERR vxge_mBIT(10)
2475#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_SG_ERR vxge_mBIT(11)
2476#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_SG_ERR vxge_mBIT(12)
2477#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_SG_ERR vxge_mBIT(13)
2478#define VXGE_HW_CP_ERR_REG_CP_MP2CP_SG_ERR vxge_mBIT(14)
2479#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_SG_ERR vxge_mBIT(15)
2480#define VXGE_HW_CP_ERR_REG_CP_STC2CP_SG_ERR(val) vxge_vBIT(val, 16, 2)
2481#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_DB_ERR(val) vxge_vBIT(val, 24, 8)
2482#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_DB_ERR(val) vxge_vBIT(val, 32, 2)
2483#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_DB_ERR vxge_mBIT(34)
2484#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_DB_ERR vxge_mBIT(35)
2485#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_DB_ERR vxge_mBIT(36)
2486#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_DB_ERR vxge_mBIT(37)
2487#define VXGE_HW_CP_ERR_REG_CP_MP2CP_DB_ERR vxge_mBIT(38)
2488#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_DB_ERR vxge_mBIT(39)
2489#define VXGE_HW_CP_ERR_REG_CP_STC2CP_DB_ERR(val) vxge_vBIT(val, 40, 2)
2490#define VXGE_HW_CP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(48)
2491#define VXGE_HW_CP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(49)
2492#define VXGE_HW_CP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(50)
2493#define VXGE_HW_CP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(51)
2494#define VXGE_HW_CP_ERR_REG_CP_SWIF2CP_FIFO_ERR vxge_mBIT(52)
2495#define VXGE_HW_CP_ERR_REG_CP_CP2DMA_FIFO_ERR vxge_mBIT(53)
2496#define VXGE_HW_CP_ERR_REG_CP_DAM2CP_FIFO_ERR vxge_mBIT(54)
2497#define VXGE_HW_CP_ERR_REG_CP_MP2CP_FIFO_ERR vxge_mBIT(55)
2498#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_FIFO_ERR vxge_mBIT(56)
2499#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_FIFO_ERR vxge_mBIT(57)
2500#define VXGE_HW_CP_ERR_REG_CP_CP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(60)
2501#define VXGE_HW_CP_ERR_REG_CP_CP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(61)
2502#define VXGE_HW_CP_ERR_REG_CP_DMA_RD_SHADOW_ERR vxge_mBIT(62)
2503#define VXGE_HW_CP_ERR_REG_CP_PIFT_CREDIT_ERR vxge_mBIT(63)
2504/*0x04f30*/ u64 cp_err_mask;
2505/*0x04f38*/ u64 cp_err_alarm;
2506 u8 unused04fe8[0x04f50-0x04f40];
2507
2508/*0x04f50*/ u64 cp_exc_reg;
2509#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_INFO_INT vxge_mBIT(47)
2510#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_CRIT_INT vxge_mBIT(55)
2511#define VXGE_HW_CP_EXC_REG_CP_CP_SERR vxge_mBIT(63)
2512/*0x04f58*/ u64 cp_exc_mask;
2513/*0x04f60*/ u64 cp_exc_alarm;
2514/*0x04f68*/ u64 cp_exc_cause;
2515#define VXGE_HW_CP_EXC_CAUSE_CP_CP_CAUSE(val) vxge_vBIT(val, 32, 32)
2516 u8 unused05200[0x05200-0x04f70];
2517
2518/*0x05200*/ u64 msg_int_status;
2519#define VXGE_HW_MSG_INT_STATUS_TIM_ERR_TIM_INT vxge_mBIT(7)
2520#define VXGE_HW_MSG_INT_STATUS_MSG_EXC_MSG_XT_EXC_INT vxge_mBIT(60)
2521#define VXGE_HW_MSG_INT_STATUS_MSG_ERR3_MSG_ERR3_INT vxge_mBIT(61)
2522#define VXGE_HW_MSG_INT_STATUS_MSG_ERR2_MSG_ERR2_INT vxge_mBIT(62)
2523#define VXGE_HW_MSG_INT_STATUS_MSG_ERR_MSG_ERR_INT vxge_mBIT(63)
2524/*0x05208*/ u64 msg_int_mask;
2525/*0x05210*/ u64 tim_err_reg;
2526#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_SG_ERR vxge_mBIT(4)
2527#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_SG_ERR vxge_mBIT(5)
2528#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_SG_ERR vxge_mBIT(6)
2529#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_SG_ERR vxge_mBIT(7)
2530#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_DB_ERR vxge_mBIT(12)
2531#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_DB_ERR vxge_mBIT(13)
2532#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_DB_ERR vxge_mBIT(14)
2533#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_DB_ERR vxge_mBIT(15)
2534#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MEM_CNTRL_SM_ERR vxge_mBIT(18)
2535#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_MEM_CNTRL_SM_ERR vxge_mBIT(19)
2536#define VXGE_HW_TIM_ERR_REG_TIM_MPIF_PCIWR_ERR vxge_mBIT(20)
2537#define VXGE_HW_TIM_ERR_REG_TIM_ROCRC_BMAP_UPDT_FIFO_ERR vxge_mBIT(22)
2538#define VXGE_HW_TIM_ERR_REG_TIM_CREATE_BMAPMSG_FIFO_ERR vxge_mBIT(23)
2539#define VXGE_HW_TIM_ERR_REG_TIM_ROCRCIF_MISMATCH vxge_mBIT(46)
2540#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MAPPING_VP_ERR(n) vxge_mBIT(n)
2541/*0x05218*/ u64 tim_err_mask;
2542/*0x05220*/ u64 tim_err_alarm;
2543/*0x05228*/ u64 msg_err_reg;
2544#define VXGE_HW_MSG_ERR_REG_UP_UXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(0)
2545#define VXGE_HW_MSG_ERR_REG_MP_MXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(1)
2546#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_READ_CMD_FSM_INTEGRITY_ERR \
2547 vxge_mBIT(2)
2548#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_RESP_FSM_INTEGRITY_ERR \
2549 vxge_mBIT(3)
2550#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_OWN_FSM_INTEGRITY_ERR vxge_mBIT(4)
2551#define VXGE_HW_MSG_ERR_REG_MSG_QUE_PDA_ACC_FSM_INTEGRITY_ERR vxge_mBIT(5)
2552#define VXGE_HW_MSG_ERR_REG_MP_MXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(6)
2553#define VXGE_HW_MSG_ERR_REG_UP_UXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(7)
2554#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_SG_ERR vxge_mBIT(8)
2555#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_SG_ERR vxge_mBIT(10)
2556#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_SG_ERR vxge_mBIT(12)
2557#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_SG_ERR vxge_mBIT(14)
2558#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_SG_ERR vxge_mBIT(16)
2559#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_SG_ERR vxge_mBIT(17)
2560#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_SG_ERR vxge_mBIT(18)
2561#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_SG_ERR vxge_mBIT(19)
2562#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_SG_ERR vxge_mBIT(20)
2563#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_SG_ERR vxge_mBIT(21)
2564#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_SG_ERR vxge_mBIT(26)
2565#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_SG_ERR vxge_mBIT(27)
2566#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_SG_ERR vxge_mBIT(29)
2567#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_SG_ERR vxge_mBIT(31)
2568#define VXGE_HW_MSG_ERR_REG_MSG_XFMDQRY_FSM_INTEGRITY_ERR vxge_mBIT(33)
2569#define VXGE_HW_MSG_ERR_REG_MSG_FRMQRY_FSM_INTEGRITY_ERR vxge_mBIT(34)
2570#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_WRITE_FSM_INTEGRITY_ERR vxge_mBIT(35)
2571#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_BWR_PF_FSM_INTEGRITY_ERR \
2572 vxge_mBIT(36)
2573#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_RESP_FIFO_ERR vxge_mBIT(38)
2574#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_DB_ERR vxge_mBIT(39)
2575#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_DB_ERR vxge_mBIT(41)
2576#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_DB_ERR vxge_mBIT(43)
2577#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_DB_ERR vxge_mBIT(45)
2578#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_DB_ERR vxge_mBIT(47)
2579#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_DB_ERR vxge_mBIT(48)
2580#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_DB_ERR vxge_mBIT(49)
2581#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_DB_ERR vxge_mBIT(50)
2582#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_DB_ERR vxge_mBIT(51)
2583#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_DB_ERR vxge_mBIT(52)
2584#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_READ_FIFO_ERR vxge_mBIT(53)
2585#define VXGE_HW_MSG_ERR_REG_MSG_QUE_MXP2UXP_FIFO_ERR vxge_mBIT(54)
2586#define VXGE_HW_MSG_ERR_REG_MSG_QUE_KDFC_SIF_FIFO_ERR vxge_mBIT(55)
2587#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CXP2SWIF_FIFO_ERR vxge_mBIT(56)
2588#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_DB_ERR vxge_mBIT(57)
2589#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_DB_ERR vxge_mBIT(58)
2590#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_SIF_FIFO_ERR vxge_mBIT(59)
2591#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_DB_ERR vxge_mBIT(60)
2592#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_READ_FIFO_ERR vxge_mBIT(61)
2593#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_DB_ERR vxge_mBIT(62)
2594#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UXP2MXP_FIFO_ERR vxge_mBIT(63)
2595/*0x05230*/ u64 msg_err_mask;
2596/*0x05238*/ u64 msg_err_alarm;
2597 u8 unused05340[0x05340-0x05240];
2598
2599/*0x05340*/ u64 msg_exc_reg;
2600#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_INFO_INT vxge_mBIT(50)
2601#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_CRIT_INT vxge_mBIT(51)
2602#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_INFO_INT vxge_mBIT(54)
2603#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_CRIT_INT vxge_mBIT(55)
2604#define VXGE_HW_MSG_EXC_REG_MP_MXP_SERR vxge_mBIT(62)
2605#define VXGE_HW_MSG_EXC_REG_UP_UXP_SERR vxge_mBIT(63)
2606/*0x05348*/ u64 msg_exc_mask;
2607/*0x05350*/ u64 msg_exc_alarm;
2608/*0x05358*/ u64 msg_exc_cause;
2609#define VXGE_HW_MSG_EXC_CAUSE_MP_MXP(val) vxge_vBIT(val, 0, 32)
2610#define VXGE_HW_MSG_EXC_CAUSE_UP_UXP(val) vxge_vBIT(val, 32, 32)
2611 u8 unused05368[0x05380-0x05360];
2612
2613/*0x05380*/ u64 msg_err2_reg;
2614#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CMG2MSG_DISPATCH_FSM_INTEGRITY_ERR \
2615 vxge_mBIT(0)
2616#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMQ_DISPATCH_FSM_INTEGRITY_ERR \
2617 vxge_mBIT(1)
2618#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_DISPATCH_FSM_INTEGRITY_ERR \
2619 vxge_mBIT(2)
2620#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_PIC_WRITE_FSM_INTEGRITY_ERR \
2621 vxge_mBIT(3)
2622#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIFREG_FSM_INTEGRITY_ERR vxge_mBIT(4)
2623#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TIM_WRITE_FSM_INTEGRITY_ERR \
2624 vxge_mBIT(5)
2625#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ_TA_FSM_INTEGRITY_ERR vxge_mBIT(6)
2626#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(7)
2627#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(8)
2628#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_TA_FSM_INTEGRITY_ERR vxge_mBIT(9)
2629#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMA_TA_FSM_INTEGRITY_ERR vxge_mBIT(10)
2630#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CP_TA_FSM_INTEGRITY_ERR vxge_mBIT(11)
2631#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA16_FSM_INTEGRITY_ERR \
2632 vxge_mBIT(12)
2633#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA15_FSM_INTEGRITY_ERR \
2634 vxge_mBIT(13)
2635#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA14_FSM_INTEGRITY_ERR \
2636 vxge_mBIT(14)
2637#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA13_FSM_INTEGRITY_ERR \
2638 vxge_mBIT(15)
2639#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA12_FSM_INTEGRITY_ERR \
2640 vxge_mBIT(16)
2641#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA11_FSM_INTEGRITY_ERR \
2642 vxge_mBIT(17)
2643#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA10_FSM_INTEGRITY_ERR \
2644 vxge_mBIT(18)
2645#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA9_FSM_INTEGRITY_ERR \
2646 vxge_mBIT(19)
2647#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA8_FSM_INTEGRITY_ERR \
2648 vxge_mBIT(20)
2649#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA7_FSM_INTEGRITY_ERR \
2650 vxge_mBIT(21)
2651#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA6_FSM_INTEGRITY_ERR \
2652 vxge_mBIT(22)
2653#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA5_FSM_INTEGRITY_ERR \
2654 vxge_mBIT(23)
2655#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA4_FSM_INTEGRITY_ERR \
2656 vxge_mBIT(24)
2657#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA3_FSM_INTEGRITY_ERR \
2658 vxge_mBIT(25)
2659#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA2_FSM_INTEGRITY_ERR \
2660 vxge_mBIT(26)
2661#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA1_FSM_INTEGRITY_ERR \
2662 vxge_mBIT(27)
2663#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA0_FSM_INTEGRITY_ERR \
2664 vxge_mBIT(28)
2665#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_FBMC_OWN_FSM_INTEGRITY_ERR vxge_mBIT(29)
2666#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
2667 vxge_mBIT(30)
2668#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
2669 vxge_mBIT(31)
2670#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
2671 vxge_mBIT(32)
2672#define VXGE_HW_MSG_ERR2_REG_MP_MP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(33)
2673#define VXGE_HW_MSG_ERR2_REG_UP_UP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(34)
2674#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ2PIC_CMD_FIFO_ERR vxge_mBIT(62)
2675#define VXGE_HW_MSG_ERR2_REG_TIM_TIM2MSG_CMD_FIFO_ERR vxge_mBIT(63)
2676/*0x05388*/ u64 msg_err2_mask;
2677/*0x05390*/ u64 msg_err2_alarm;
2678/*0x05398*/ u64 msg_err3_reg;
2679#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR0 vxge_mBIT(0)
2680#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR1 vxge_mBIT(1)
2681#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR2 vxge_mBIT(2)
2682#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR3 vxge_mBIT(3)
2683#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR4 vxge_mBIT(4)
2684#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR5 vxge_mBIT(5)
2685#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR6 vxge_mBIT(6)
2686#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR7 vxge_mBIT(7)
2687#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR0 vxge_mBIT(8)
2688#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR1 vxge_mBIT(9)
2689#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR0 vxge_mBIT(16)
2690#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR1 vxge_mBIT(17)
2691#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR2 vxge_mBIT(18)
2692#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR3 vxge_mBIT(19)
2693#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR4 vxge_mBIT(20)
2694#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR5 vxge_mBIT(21)
2695#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR6 vxge_mBIT(22)
2696#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR7 vxge_mBIT(23)
2697#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR0 vxge_mBIT(24)
2698#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR1 vxge_mBIT(25)
2699#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR0 vxge_mBIT(32)
2700#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR1 vxge_mBIT(33)
2701#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR2 vxge_mBIT(34)
2702#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR3 vxge_mBIT(35)
2703#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR4 vxge_mBIT(36)
2704#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR5 vxge_mBIT(37)
2705#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR6 vxge_mBIT(38)
2706#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR7 vxge_mBIT(39)
2707#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR0 vxge_mBIT(40)
2708#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR1 vxge_mBIT(41)
2709#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR0 vxge_mBIT(48)
2710#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR1 vxge_mBIT(49)
2711#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR2 vxge_mBIT(50)
2712#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR3 vxge_mBIT(51)
2713#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR4 vxge_mBIT(52)
2714#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR5 vxge_mBIT(53)
2715#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR6 vxge_mBIT(54)
2716#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR7 vxge_mBIT(55)
2717#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR0 vxge_mBIT(56)
2718#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR1 vxge_mBIT(57)
2719/*0x053a0*/ u64 msg_err3_mask;
2720/*0x053a8*/ u64 msg_err3_alarm;
2721 u8 unused05600[0x05600-0x053b0];
2722
2723/*0x05600*/ u64 fau_gen_err_reg;
2724#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT0_PERMANENT_STOP vxge_mBIT(3)
2725#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT1_PERMANENT_STOP vxge_mBIT(7)
2726#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT2_PERMANENT_STOP vxge_mBIT(11)
2727#define VXGE_HW_FAU_GEN_ERR_REG_FALR_AUTO_LRO_NOTIFICATION vxge_mBIT(15)
2728/*0x05608*/ u64 fau_gen_err_mask;
2729/*0x05610*/ u64 fau_gen_err_alarm;
2730/*0x05618*/ u64 fau_ecc_err_reg;
2731#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_SG_ERR vxge_mBIT(0)
2732#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_DB_ERR vxge_mBIT(1)
2733#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_SG_ERR(val) \
2734 vxge_vBIT(val, 2, 2)
2735#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_DB_ERR(val) \
2736 vxge_vBIT(val, 4, 2)
2737#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_SG_ERR vxge_mBIT(6)
2738#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_DB_ERR vxge_mBIT(7)
2739#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_SG_ERR(val) \
2740 vxge_vBIT(val, 8, 2)
2741#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_DB_ERR(val) \
2742 vxge_vBIT(val, 10, 2)
2743#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_SG_ERR vxge_mBIT(12)
2744#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_DB_ERR vxge_mBIT(13)
2745#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_SG_ERR(val) \
2746 vxge_vBIT(val, 14, 2)
2747#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_DB_ERR(val) \
2748 vxge_vBIT(val, 16, 2)
2749#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_SG_ERR(val) \
2750 vxge_vBIT(val, 18, 2)
2751#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_DB_ERR(val) \
2752 vxge_vBIT(val, 20, 2)
2753#define VXGE_HW_FAU_ECC_ERR_REG_FAUJ_FAU_FSM_ERR vxge_mBIT(31)
2754/*0x05620*/ u64 fau_ecc_err_mask;
2755/*0x05628*/ u64 fau_ecc_err_alarm;
2756 u8 unused05658[0x05658-0x05630];
2757/*0x05658*/ u64 fau_pa_cfg;
2758#define VXGE_HW_FAU_PA_CFG_REPL_L4_COMP_CSUM vxge_mBIT(3)
2759#define VXGE_HW_FAU_PA_CFG_REPL_L3_INCL_CF vxge_mBIT(7)
2760#define VXGE_HW_FAU_PA_CFG_REPL_L3_COMP_CSUM vxge_mBIT(11)
2761 u8 unused05668[0x05668-0x05660];
2762
2763/*0x05668*/ u64 dbg_stats_fau_rx_path;
2764#define VXGE_HW_DBG_STATS_FAU_RX_PATH_RX_PERMITTED_FRMS(val) \
2765 vxge_vBIT(val, 32, 32)
2766 u8 unused056c0[0x056c0-0x05670];
2767
2768/*0x056c0*/ u64 fau_lag_cfg;
2769#define VXGE_HW_FAU_LAG_CFG_COLL_ALG(val) vxge_vBIT(val, 2, 2)
2770#define VXGE_HW_FAU_LAG_CFG_INCR_RX_AGGR_STATS vxge_mBIT(7)
2771 u8 unused05800[0x05800-0x056c8];
2772
2773/*0x05800*/ u64 tpa_int_status;
2774#define VXGE_HW_TPA_INT_STATUS_ORP_ERR_ORP_INT vxge_mBIT(15)
2775#define VXGE_HW_TPA_INT_STATUS_PTM_ALARM_PTM_INT vxge_mBIT(23)
2776#define VXGE_HW_TPA_INT_STATUS_TPA_ERROR_TPA_INT vxge_mBIT(31)
2777/*0x05808*/ u64 tpa_int_mask;
2778/*0x05810*/ u64 orp_err_reg;
2779#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_SG_ERR vxge_mBIT(3)
2780#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_DB_ERR vxge_mBIT(7)
2781#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_FIFO_UFLOW_ERR vxge_mBIT(11)
2782#define VXGE_HW_ORP_ERR_REG_ORP_FRM_FIFO_UFLOW_ERR vxge_mBIT(15)
2783#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_FSM_ERR vxge_mBIT(19)
2784#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_FSM_ERR vxge_mBIT(23)
2785#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_FSM_ERR vxge_mBIT(27)
2786#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_SHADOW_ERR vxge_mBIT(31)
2787#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_SHADOW_ERR vxge_mBIT(35)
2788#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_SHADOW_ERR vxge_mBIT(39)
2789#define VXGE_HW_ORP_ERR_REG_ORP_OUTFRM_SHADOW_ERR vxge_mBIT(43)
2790#define VXGE_HW_ORP_ERR_REG_ORP_OPTPRS_SHADOW_ERR vxge_mBIT(47)
2791/*0x05818*/ u64 orp_err_mask;
2792/*0x05820*/ u64 orp_err_alarm;
2793/*0x05828*/ u64 ptm_alarm_reg;
2794#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_SYNC_ERR vxge_mBIT(3)
2795#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_FIFO_ERR vxge_mBIT(7)
2796#define VXGE_HW_PTM_ALARM_REG_XFMD_RD_FIFO_ERR vxge_mBIT(11)
2797#define VXGE_HW_PTM_ALARM_REG_WDE2MSR_WR_FIFO_ERR vxge_mBIT(15)
2798#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_DB_ERR(val) vxge_vBIT(val, 18, 2)
2799#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_SG_ERR(val) vxge_vBIT(val, 22, 2)
2800/*0x05830*/ u64 ptm_alarm_mask;
2801/*0x05838*/ u64 ptm_alarm_alarm;
2802/*0x05840*/ u64 tpa_error_reg;
2803#define VXGE_HW_TPA_ERROR_REG_TPA_FSM_ERR_ALARM vxge_mBIT(3)
2804#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_DB_ERR vxge_mBIT(7)
2805#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_SG_ERR vxge_mBIT(11)
2806/*0x05848*/ u64 tpa_error_mask;
2807/*0x05850*/ u64 tpa_error_alarm;
2808/*0x05858*/ u64 tpa_global_cfg;
2809#define VXGE_HW_TPA_GLOBAL_CFG_SUPPORT_SNAP_AB_N vxge_mBIT(7)
2810#define VXGE_HW_TPA_GLOBAL_CFG_ECC_ENABLE_N vxge_mBIT(35)
2811 u8 unused05868[0x05870-0x05860];
2812
2813/*0x05870*/ u64 ptm_ecc_cfg;
2814#define VXGE_HW_PTM_ECC_CFG_PTM_FRMM_ECC_EN_N vxge_mBIT(3)
2815/*0x05878*/ u64 ptm_phase_cfg;
2816#define VXGE_HW_PTM_PHASE_CFG_FRMM_WR_PHASE_EN vxge_mBIT(3)
2817#define VXGE_HW_PTM_PHASE_CFG_FRMM_RD_PHASE_EN vxge_mBIT(7)
2818 u8 unused05898[0x05898-0x05880];
2819
2820/*0x05898*/ u64 dbg_stats_tpa_tx_path;
2821#define VXGE_HW_DBG_STATS_TPA_TX_PATH_TX_PERMITTED_FRMS(val) \
2822 vxge_vBIT(val, 32, 32)
2823 u8 unused05900[0x05900-0x058a0];
2824
2825/*0x05900*/ u64 tmac_int_status;
2826#define VXGE_HW_TMAC_INT_STATUS_TXMAC_GEN_ERR_TXMAC_GEN_INT vxge_mBIT(3)
2827#define VXGE_HW_TMAC_INT_STATUS_TXMAC_ECC_ERR_TXMAC_ECC_INT vxge_mBIT(7)
2828/*0x05908*/ u64 tmac_int_mask;
2829/*0x05910*/ u64 txmac_gen_err_reg;
2830#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_PERMANENT_STOP vxge_mBIT(3)
2831#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_NO_VALID_VSPORT vxge_mBIT(7)
2832/*0x05918*/ u64 txmac_gen_err_mask;
2833/*0x05920*/ u64 txmac_gen_err_alarm;
2834/*0x05928*/ u64 txmac_ecc_err_reg;
2835#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_SG_ERR vxge_mBIT(3)
2836#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_DB_ERR vxge_mBIT(7)
2837#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_SG_ERR vxge_mBIT(11)
2838#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_DB_ERR vxge_mBIT(15)
2839#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_SG_ERR vxge_mBIT(19)
2840#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_DB_ERR vxge_mBIT(23)
2841#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT0_FSM_ERR vxge_mBIT(27)
2842#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT1_FSM_ERR vxge_mBIT(31)
2843#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT2_FSM_ERR vxge_mBIT(35)
2844#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMACJ_FSM_ERR vxge_mBIT(39)
2845/*0x05930*/ u64 txmac_ecc_err_mask;
2846/*0x05938*/ u64 txmac_ecc_err_alarm;
2847 u8 unused05978[0x05978-0x05940];
2848
2849/*0x05978*/ u64 dbg_stat_tx_any_frms;
2850#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT0_TX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
2851#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT1_TX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
2852#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT2_TX_ANY_FRMS(val) \
2853 vxge_vBIT(val, 16, 8)
2854 u8 unused059a0[0x059a0-0x05980];
2855
2856/*0x059a0*/ u64 txmac_link_util_port[3];
2857#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_UTILIZATION(val) \
2858 vxge_vBIT(val, 1, 7)
2859#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
2860#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_FRAC_UTIL(val) \
2861 vxge_vBIT(val, 12, 4)
2862#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
2863#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_SCALE_FACTOR vxge_mBIT(23)
2864/*0x059b8*/ u64 txmac_cfg0_port[3];
2865#define VXGE_HW_TXMAC_CFG0_PORT_TMAC_EN vxge_mBIT(3)
2866#define VXGE_HW_TXMAC_CFG0_PORT_APPEND_PAD vxge_mBIT(7)
2867#define VXGE_HW_TXMAC_CFG0_PORT_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
2868/*0x059d0*/ u64 txmac_cfg1_port[3];
2869#define VXGE_HW_TXMAC_CFG1_PORT_AVG_IPG(val) vxge_vBIT(val, 40, 8)
2870/*0x059e8*/ u64 txmac_status_port[3];
2871#define VXGE_HW_TXMAC_STATUS_PORT_TMAC_TX_FRM_SENT vxge_mBIT(3)
2872 u8 unused05a20[0x05a20-0x05a00];
2873
2874/*0x05a20*/ u64 lag_distrib_dest;
2875#define VXGE_HW_LAG_DISTRIB_DEST_MAP_VPATH(n) vxge_mBIT(n)
2876/*0x05a28*/ u64 lag_marker_cfg;
2877#define VXGE_HW_LAG_MARKER_CFG_GEN_RCVR_EN vxge_mBIT(3)
2878#define VXGE_HW_LAG_MARKER_CFG_RESP_EN vxge_mBIT(7)
2879#define VXGE_HW_LAG_MARKER_CFG_RESP_TIMEOUT(val) vxge_vBIT(val, 16, 16)
2880#define VXGE_HW_LAG_MARKER_CFG_SLOW_PROTO_MRKR_MIN_INTERVAL(val) \
2881 vxge_vBIT(val, 32, 16)
2882#define VXGE_HW_LAG_MARKER_CFG_THROTTLE_MRKR_RESP vxge_mBIT(51)
2883/*0x05a30*/ u64 lag_tx_cfg;
2884#define VXGE_HW_LAG_TX_CFG_INCR_TX_AGGR_STATS vxge_mBIT(3)
2885#define VXGE_HW_LAG_TX_CFG_DISTRIB_ALG_SEL(val) vxge_vBIT(val, 6, 2)
2886#define VXGE_HW_LAG_TX_CFG_DISTRIB_REMAP_IF_FAIL vxge_mBIT(11)
2887#define VXGE_HW_LAG_TX_CFG_COLL_MAX_DELAY(val) vxge_vBIT(val, 16, 16)
2888/*0x05a38*/ u64 lag_tx_status;
2889#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_EMPTIED_LINK(val) \
2890 vxge_vBIT(val, 0, 8)
2891#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKR(val) \
2892 vxge_vBIT(val, 8, 8)
2893#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKRRESP(val) \
2894 vxge_vBIT(val, 16, 8)
2895 u8 unused05d48[0x05d48-0x05a40];
2896
2897/*0x05d48*/ u64 srpcim_to_mrpcim_vplane_rmsg[17];
2898#define \
2899VXGE_HAL_SRPCIM_TO_MRPCIM_VPLANE_RMSG_SWIF_SRPCIM_TO_MRPCIM_VPLANE_RMSG(val)\
2900 vxge_vBIT(val, 0, 64)
2901 u8 unused06420[0x06420-0x05dd0];
2902
2903/*0x06420*/ u64 mrpcim_to_srpcim_vplane_wmsg[17];
2904#define VXGE_HW_MRPCIM_TO_SRPCIM_VPLANE_WMSG_MRPCIM_TO_SRPCIM_VPLANE_WMSG(val) \
2905 vxge_vBIT(val, 0, 64)
2906/*0x064a8*/ u64 mrpcim_to_srpcim_vplane_wmsg_trig[17];
2907
2908/*0x06530*/ u64 debug_stats0;
2909#define VXGE_HW_DEBUG_STATS0_RSTDROP_MSG(val) vxge_vBIT(val, 0, 32)
2910#define VXGE_HW_DEBUG_STATS0_RSTDROP_CPL(val) vxge_vBIT(val, 32, 32)
2911/*0x06538*/ u64 debug_stats1;
2912#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT0(val) vxge_vBIT(val, 0, 32)
2913#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT1(val) vxge_vBIT(val, 32, 32)
2914/*0x06540*/ u64 debug_stats2;
2915#define VXGE_HW_DEBUG_STATS2_RSTDROP_CLIENT2(val) vxge_vBIT(val, 0, 32)
2916/*0x06548*/ u64 debug_stats3_vplane[17];
2917#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_PH(val) vxge_vBIT(val, 0, 16)
2918#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_NPH(val) vxge_vBIT(val, 16, 16)
2919#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_CPLH(val) vxge_vBIT(val, 32, 16)
2920/*0x065d0*/ u64 debug_stats4_vplane[17];
2921#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_PD(val) vxge_vBIT(val, 0, 16)
2922#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_NPD(val) vxge_vBIT(val, 16, 16)
2923#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_CPLD(val) vxge_vBIT(val, 32, 16)
2924
2925 u8 unused07000[0x07000-0x06658];
2926
2927/*0x07000*/ u64 mrpcim_general_int_status;
2928#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(0)
2929#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(1)
2930#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RTDMA_INT vxge_mBIT(2)
2931#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(3)
2932#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMCT_INT vxge_mBIT(4)
2933#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG1_INT vxge_mBIT(5)
2934#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG2_INT vxge_mBIT(6)
2935#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG3_INT vxge_mBIT(7)
2936#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFL_INT vxge_mBIT(8)
2937#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFU_INT vxge_mBIT(9)
2938#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG1_INT vxge_mBIT(10)
2939#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG2_INT vxge_mBIT(11)
2940#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG3_INT vxge_mBIT(12)
2941#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(13)
2942#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RXMAC_INT vxge_mBIT(14)
2943#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TMAC_INT vxge_mBIT(15)
2944#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBIF_INT vxge_mBIT(16)
2945#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_FBMC_INT vxge_mBIT(17)
2946#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBCT_INT vxge_mBIT(18)
2947#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TPA_INT vxge_mBIT(19)
2948#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_DRBELL_INT vxge_mBIT(20)
2949#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_ONE_INT vxge_mBIT(21)
2950#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_MSG_INT vxge_mBIT(22)
2951/*0x07008*/ u64 mrpcim_general_int_mask;
2952#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PIC_INT vxge_mBIT(0)
2953#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCI_INT vxge_mBIT(1)
2954#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RTDMA_INT vxge_mBIT(2)
2955#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(3)
2956#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMCT_INT vxge_mBIT(4)
2957#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG1_INT vxge_mBIT(5)
2958#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG2_INT vxge_mBIT(6)
2959#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG3_INT vxge_mBIT(7)
2960#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFL_INT vxge_mBIT(8)
2961#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFU_INT vxge_mBIT(9)
2962#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG1_INT vxge_mBIT(10)
2963#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG2_INT vxge_mBIT(11)
2964#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG3_INT vxge_mBIT(12)
2965#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(13)
2966#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RXMAC_INT vxge_mBIT(14)
2967#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TMAC_INT vxge_mBIT(15)
2968#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBIF_INT vxge_mBIT(16)
2969#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_FBMC_INT vxge_mBIT(17)
2970#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBCT_INT vxge_mBIT(18)
2971#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TPA_INT vxge_mBIT(19)
2972#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_DRBELL_INT vxge_mBIT(20)
2973#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_ONE_INT vxge_mBIT(21)
2974#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_MSG_INT vxge_mBIT(22)
2975/*0x07010*/ u64 mrpcim_ppif_int_status;
2976#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_INI_ERRORS_INI_INT vxge_mBIT(3)
2977#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_DMA_ERRORS_DMA_INT vxge_mBIT(7)
2978#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_TGT_ERRORS_TGT_INT vxge_mBIT(11)
2979#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CONFIG_ERRORS_CONFIG_INT vxge_mBIT(15)
2980#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_CRDT_INT vxge_mBIT(19)
2981#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_PLL_ERRORS_PLL_INT vxge_mBIT(27)
2982#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE0_CRD_INT_VPLANE0_INT\
2983 vxge_mBIT(31)
2984#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE1_CRD_INT_VPLANE1_INT\
2985 vxge_mBIT(32)
2986#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE2_CRD_INT_VPLANE2_INT\
2987 vxge_mBIT(33)
2988#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE3_CRD_INT_VPLANE3_INT\
2989 vxge_mBIT(34)
2990#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE4_CRD_INT_VPLANE4_INT\
2991 vxge_mBIT(35)
2992#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE5_CRD_INT_VPLANE5_INT\
2993 vxge_mBIT(36)
2994#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE6_CRD_INT_VPLANE6_INT\
2995 vxge_mBIT(37)
2996#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE7_CRD_INT_VPLANE7_INT\
2997 vxge_mBIT(38)
2998#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE8_CRD_INT_VPLANE8_INT\
2999 vxge_mBIT(39)
3000#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE9_CRD_INT_VPLANE9_INT\
3001 vxge_mBIT(40)
3002#define \
3003VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE10_CRD_INT_VPLANE10_INT \
3004 vxge_mBIT(41)
3005#define \
3006VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE11_CRD_INT_VPLANE11_INT \
3007 vxge_mBIT(42)
3008#define \
3009VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE12_CRD_INT_VPLANE12_INT \
3010 vxge_mBIT(43)
3011#define \
3012VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE13_CRD_INT_VPLANE13_INT \
3013 vxge_mBIT(44)
3014#define \
3015VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE14_CRD_INT_VPLANE14_INT \
3016 vxge_mBIT(45)
3017#define \
3018VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE15_CRD_INT_VPLANE15_INT \
3019 vxge_mBIT(46)
3020#define \
3021VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE16_CRD_INT_VPLANE16_INT \
3022 vxge_mBIT(47)
3023#define \
3024VXGE_HW_MRPCIM_PPIF_INT_STATUS_VPATH_TO_MRPCIM_ALARM_VPATH_TO_MRPCIM_ALARM_INT \
3025 vxge_mBIT(55)
3026/*0x07018*/ u64 mrpcim_ppif_int_mask;
3027 u8 unused07028[0x07028-0x07020];
3028
3029/*0x07028*/ u64 ini_errors_reg;
3030#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT_UNUSED_TAG vxge_mBIT(3)
3031#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT vxge_mBIT(7)
3032#define VXGE_HW_INI_ERRORS_REG_DCPL_FSM_ERR vxge_mBIT(11)
3033#define VXGE_HW_INI_ERRORS_REG_DCPL_POISON vxge_mBIT(12)
3034#define VXGE_HW_INI_ERRORS_REG_DCPL_UNSUPPORTED vxge_mBIT(15)
3035#define VXGE_HW_INI_ERRORS_REG_DCPL_ABORT vxge_mBIT(19)
3036#define VXGE_HW_INI_ERRORS_REG_INI_TLP_ABORT vxge_mBIT(23)
3037#define VXGE_HW_INI_ERRORS_REG_INI_DLLP_ABORT vxge_mBIT(27)
3038#define VXGE_HW_INI_ERRORS_REG_INI_ECRC_ERR vxge_mBIT(31)
3039#define VXGE_HW_INI_ERRORS_REG_INI_BUF_DB_ERR vxge_mBIT(35)
3040#define VXGE_HW_INI_ERRORS_REG_INI_BUF_SG_ERR vxge_mBIT(39)
3041#define VXGE_HW_INI_ERRORS_REG_INI_DATA_OVERFLOW vxge_mBIT(43)
3042#define VXGE_HW_INI_ERRORS_REG_INI_HDR_OVERFLOW vxge_mBIT(47)
3043#define VXGE_HW_INI_ERRORS_REG_INI_MRD_SYS_DROP vxge_mBIT(51)
3044#define VXGE_HW_INI_ERRORS_REG_INI_MWR_SYS_DROP vxge_mBIT(55)
3045#define VXGE_HW_INI_ERRORS_REG_INI_MRD_CLIENT_DROP vxge_mBIT(59)
3046#define VXGE_HW_INI_ERRORS_REG_INI_MWR_CLIENT_DROP vxge_mBIT(63)
3047/*0x07030*/ u64 ini_errors_mask;
3048/*0x07038*/ u64 ini_errors_alarm;
3049/*0x07040*/ u64 dma_errors_reg;
3050#define VXGE_HW_DMA_ERRORS_REG_RDARB_FSM_ERR vxge_mBIT(3)
3051#define VXGE_HW_DMA_ERRORS_REG_WRARB_FSM_ERR vxge_mBIT(7)
3052#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_OVERFLOW vxge_mBIT(8)
3053#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_UNDERFLOW vxge_mBIT(9)
3054#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_OVERFLOW vxge_mBIT(10)
3055#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_UNDERFLOW vxge_mBIT(11)
3056#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_OVERFLOW vxge_mBIT(12)
3057#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_UNDERFLOW vxge_mBIT(13)
3058#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_OVERFLOW vxge_mBIT(14)
3059#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_UNDERFLOW vxge_mBIT(15)
3060#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_OVERFLOW vxge_mBIT(16)
3061#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_UNDERFLOW vxge_mBIT(17)
3062#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_OVERFLOW vxge_mBIT(18)
3063#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_UNDERFLOW vxge_mBIT(19)
3064#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_OVERFLOW vxge_mBIT(20)
3065#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_UNDERFLOW vxge_mBIT(21)
3066#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_OVERFLOW vxge_mBIT(22)
3067#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_UNDERFLOW vxge_mBIT(23)
3068#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_OVERFLOW vxge_mBIT(24)
3069#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_UNDERFLOW vxge_mBIT(25)
3070#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_OVERFLOW vxge_mBIT(28)
3071#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_UNDERFLOW vxge_mBIT(29)
3072#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_FSM_ERR vxge_mBIT(32)
3073#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_CREDIT_FSM_ERR vxge_mBIT(33)
3074#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_DMA_WRR_SM_ERR vxge_mBIT(34)
3075/*0x07048*/ u64 dma_errors_mask;
3076/*0x07050*/ u64 dma_errors_alarm;
3077/*0x07058*/ u64 tgt_errors_reg;
3078#define VXGE_HW_TGT_ERRORS_REG_TGT_VENDOR_MSG vxge_mBIT(0)
3079#define VXGE_HW_TGT_ERRORS_REG_TGT_MSG_UNLOCK vxge_mBIT(1)
3080#define VXGE_HW_TGT_ERRORS_REG_TGT_ILLEGAL_TLP_BE vxge_mBIT(2)
3081#define VXGE_HW_TGT_ERRORS_REG_TGT_BOOT_WRITE vxge_mBIT(3)
3082#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_WR_CROSS_QWRANGE vxge_mBIT(4)
3083#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_READ_CROSS_QWRANGE vxge_mBIT(5)
3084#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_READ vxge_mBIT(6)
3085#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_READ vxge_mBIT(7)
3086#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_WR_CROSS_QWRANGE vxge_mBIT(8)
3087#define VXGE_HW_TGT_ERRORS_REG_TGT_MSIX_BEYOND_RANGE vxge_mBIT(9)
3088#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_KDFC_POISON vxge_mBIT(10)
3089#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_USDC_POISON vxge_mBIT(11)
3090#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_PIF_POISON vxge_mBIT(12)
3091#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MSIX_POISON vxge_mBIT(13)
3092#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MRIOV_POISON vxge_mBIT(14)
3093#define VXGE_HW_TGT_ERRORS_REG_TGT_NOT_MEM_TLP vxge_mBIT(15)
3094#define VXGE_HW_TGT_ERRORS_REG_TGT_UNKNOWN_MEM_TLP vxge_mBIT(16)
3095#define VXGE_HW_TGT_ERRORS_REG_TGT_REQ_FSM_ERR vxge_mBIT(17)
3096#define VXGE_HW_TGT_ERRORS_REG_TGT_CPL_FSM_ERR vxge_mBIT(18)
3097#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_PROT_ERR vxge_mBIT(19)
3098#define VXGE_HW_TGT_ERRORS_REG_TGT_SWIF_PROT_ERR vxge_mBIT(20)
3099#define VXGE_HW_TGT_ERRORS_REG_TGT_MRIOV_MEM_MAP_CFG_ERR vxge_mBIT(21)
3100/*0x07060*/ u64 tgt_errors_mask;
3101/*0x07068*/ u64 tgt_errors_alarm;
3102/*0x07070*/ u64 config_errors_reg;
3103#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_STOP_COND vxge_mBIT(3)
3104#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_START_COND vxge_mBIT(7)
3105#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXP_RD_CNT vxge_mBIT(11)
3106#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXTRA_CYCLE vxge_mBIT(15)
3107#define VXGE_HW_CONFIG_ERRORS_REG_I2C_MAIN_FSM_ERR vxge_mBIT(19)
3108#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REQ_COLLISION vxge_mBIT(23)
3109#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REG_FSM_ERR vxge_mBIT(27)
3110#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_I2C_TIMEOUT vxge_mBIT(31)
3111#define VXGE_HW_CONFIG_ERRORS_REG_RIC_I2C_TIMEOUT vxge_mBIT(35)
3112#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_FSM_ERR vxge_mBIT(39)
3113#define VXGE_HW_CONFIG_ERRORS_REG_RIC_FSM_ERR vxge_mBIT(43)
3114#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_ILLEGAL_ACCESS vxge_mBIT(47)
3115#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TIMEOUT vxge_mBIT(51)
3116#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_FSM_ERR vxge_mBIT(55)
3117#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TO_FSM_ERR vxge_mBIT(59)
3118#define VXGE_HW_CONFIG_ERRORS_REG_RIC_RIC_RD_TIMEOUT vxge_mBIT(63)
3119/*0x07078*/ u64 config_errors_mask;
3120/*0x07080*/ u64 config_errors_alarm;
3121 u8 unused07090[0x07090-0x07088];
3122
3123/*0x07090*/ u64 crdt_errors_reg;
3124#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_FSM_ERR vxge_mBIT(11)
3125#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_INTCTL_ILLEGAL_CRD_DEAL \
3126 vxge_mBIT(15)
3127#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(19)
3128#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PCI_MSG_ILLEGAL_CRD_DEAL \
3129 vxge_mBIT(23)
3130#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_FSM_ERR vxge_mBIT(35)
3131#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_RDA_ILLEGAL_CRD_DEAL vxge_mBIT(39)
3132#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(43)
3133#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_DBLGEN_ILLEGAL_CRD_DEAL \
3134 vxge_mBIT(47)
3135/*0x07098*/ u64 crdt_errors_mask;
3136/*0x070a0*/ u64 crdt_errors_alarm;
3137 u8 unused070b0[0x070b0-0x070a8];
3138
3139/*0x070b0*/ u64 mrpcim_general_errors_reg;
3140#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_STATSB_FSM_ERR vxge_mBIT(3)
3141#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XGEN_FSM_ERR vxge_mBIT(7)
3142#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XMEM_FSM_ERR vxge_mBIT(11)
3143#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_KDFCCTL_FSM_ERR vxge_mBIT(15)
3144#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_MRIOVCTL_FSM_ERR vxge_mBIT(19)
3145#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_FLSH_ERR vxge_mBIT(23)
3146#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_ACK_ERR vxge_mBIT(27)
3147#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_CHKSUM_ERR vxge_mBIT(31)
3148#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(35)
3149#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSIX_FSM_ERR vxge_mBIT(39)
3150#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSI_OVERFLOW vxge_mBIT(43)
3151#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_PCI_NOT_FLUSH_DURING_SW_RESET \
3152 vxge_mBIT(47)
3153#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_SW_RESET_FSM_ERR vxge_mBIT(51)
3154/*0x070b8*/ u64 mrpcim_general_errors_mask;
3155/*0x070c0*/ u64 mrpcim_general_errors_alarm;
3156 u8 unused070d0[0x070d0-0x070c8];
3157
3158/*0x070d0*/ u64 pll_errors_reg;
3159#define VXGE_HW_PLL_ERRORS_REG_CORE_CMG_PLL_OOL vxge_mBIT(3)
3160#define VXGE_HW_PLL_ERRORS_REG_CORE_FB_PLL_OOL vxge_mBIT(7)
3161#define VXGE_HW_PLL_ERRORS_REG_CORE_X_PLL_OOL vxge_mBIT(11)
3162/*0x070d8*/ u64 pll_errors_mask;
3163/*0x070e0*/ u64 pll_errors_alarm;
3164/*0x070e8*/ u64 srpcim_to_mrpcim_alarm_reg;
3165#define VXGE_HW_SRPCIM_TO_MRPCIM_ALARM_REG_PPIF_SRPCIM_TO_MRPCIM_ALARM(val) \
3166 vxge_vBIT(val, 0, 17)
3167/*0x070f0*/ u64 srpcim_to_mrpcim_alarm_mask;
3168/*0x070f8*/ u64 srpcim_to_mrpcim_alarm_alarm;
3169/*0x07100*/ u64 vpath_to_mrpcim_alarm_reg;
3170#define VXGE_HW_VPATH_TO_MRPCIM_ALARM_REG_PPIF_VPATH_TO_MRPCIM_ALARM(val) \
3171 vxge_vBIT(val, 0, 17)
3172/*0x07108*/ u64 vpath_to_mrpcim_alarm_mask;
3173/*0x07110*/ u64 vpath_to_mrpcim_alarm_alarm;
3174 u8 unused07128[0x07128-0x07118];
3175
3176/*0x07128*/ u64 crdt_errors_vplane_reg[17];
3177#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_CONSUME_CRDT_ERR \
3178 vxge_mBIT(3)
3179#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_CONSUME_CRDT_ERR \
3180 vxge_mBIT(7)
3181#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_RETURN_CRDT_ERR \
3182 vxge_mBIT(11)
3183#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_RETURN_CRDT_ERR \
3184 vxge_mBIT(15)
3185#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_CONSUME_CRDT_ERR \
3186 vxge_mBIT(19)
3187#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_RETURN_CRDT_ERR \
3188 vxge_mBIT(23)
3189#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_CONSUME_TAG_ERR \
3190 vxge_mBIT(27)
3191#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_RETURN_TAG_ERR \
3192 vxge_mBIT(31)
3193/*0x07130*/ u64 crdt_errors_vplane_mask[17];
3194/*0x07138*/ u64 crdt_errors_vplane_alarm[17];
3195 u8 unused072f0[0x072f0-0x072c0];
3196
3197/*0x072f0*/ u64 mrpcim_rst_in_prog;
3198#define VXGE_HW_MRPCIM_RST_IN_PROG_MRPCIM_RST_IN_PROG vxge_mBIT(7)
3199/*0x072f8*/ u64 mrpcim_reg_modified;
3200#define VXGE_HW_MRPCIM_REG_MODIFIED_MRPCIM_REG_MODIFIED vxge_mBIT(7)
3201
3202 u8 unused07378[0x07378-0x07300];
3203
3204/*0x07378*/ u64 write_arb_pending;
3205#define VXGE_HW_WRITE_ARB_PENDING_WRARB_WRDMA vxge_mBIT(3)
3206#define VXGE_HW_WRITE_ARB_PENDING_WRARB_RTDMA vxge_mBIT(7)
3207#define VXGE_HW_WRITE_ARB_PENDING_WRARB_MSG vxge_mBIT(11)
3208#define VXGE_HW_WRITE_ARB_PENDING_WRARB_STATSB vxge_mBIT(15)
3209#define VXGE_HW_WRITE_ARB_PENDING_WRARB_INTCTL vxge_mBIT(19)
3210/*0x07380*/ u64 read_arb_pending;
3211#define VXGE_HW_READ_ARB_PENDING_RDARB_WRDMA vxge_mBIT(3)
3212#define VXGE_HW_READ_ARB_PENDING_RDARB_RTDMA vxge_mBIT(7)
3213#define VXGE_HW_READ_ARB_PENDING_RDARB_DBLGEN vxge_mBIT(11)
3214/*0x07388*/ u64 dmaif_dmadbl_pending;
3215#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_WR vxge_mBIT(0)
3216#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_RD vxge_mBIT(1)
3217#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_WR vxge_mBIT(2)
3218#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_RD vxge_mBIT(3)
3219#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_MSG_WR vxge_mBIT(4)
3220#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_STATS_WR vxge_mBIT(5)
3221#define VXGE_HW_DMAIF_DMADBL_PENDING_DBLGEN_IN_PROG(val) \
3222 vxge_vBIT(val, 13, 51)
3223/*0x07390*/ u64 wrcrdtarb_status0_vplane[17];
3224#define VXGE_HW_WRCRDTARB_STATUS0_VPLANE_WRCRDTARB_ABS_AVAIL_P_H(val) \
3225 vxge_vBIT(val, 0, 8)
3226/*0x07418*/ u64 wrcrdtarb_status1_vplane[17];
3227#define VXGE_HW_WRCRDTARB_STATUS1_VPLANE_WRCRDTARB_ABS_AVAIL_P_D(val) \
3228 vxge_vBIT(val, 4, 12)
3229 u8 unused07500[0x07500-0x074a0];
3230
3231/*0x07500*/ u64 mrpcim_general_cfg1;
3232#define VXGE_HW_MRPCIM_GENERAL_CFG1_CLEAR_SERR vxge_mBIT(7)
3233/*0x07508*/ u64 mrpcim_general_cfg2;
3234#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_WR_TD vxge_mBIT(3)
3235#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_RD_TD vxge_mBIT(7)
3236#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_CPL_TD vxge_mBIT(11)
3237#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MWR vxge_mBIT(15)
3238#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MRD vxge_mBIT(19)
3239#define VXGE_HW_MRPCIM_GENERAL_CFG2_IGNORE_VPATH_RST_FOR_MSIX vxge_mBIT(23)
3240#define VXGE_HW_MRPCIM_GENERAL_CFG2_FLASH_READ_MSB vxge_mBIT(27)
3241#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_HOST_PIPELINE_WR vxge_mBIT(31)
3242#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE vxge_mBIT(43)
3243#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_MAP_TO_VPATH(val) \
3244 vxge_vBIT(val, 47, 5)
3245#define VXGE_HW_MRPCIM_GENERAL_CFG2_EN_BLOCK_MSIX_DUE_TO_SERR vxge_mBIT(55)
3246#define VXGE_HW_MRPCIM_GENERAL_CFG2_FORCE_SENDING_INTA vxge_mBIT(59)
3247#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_SWIF_PROT_ON_RDS vxge_mBIT(63)
3248/*0x07510*/ u64 mrpcim_general_cfg3;
3249#define VXGE_HW_MRPCIM_GENERAL_CFG3_PROTECTION_CA_OR_UNSUPN vxge_mBIT(0)
3250#define VXGE_HW_MRPCIM_GENERAL_CFG3_ILLEGAL_RD_CA_OR_UNSUPN vxge_mBIT(3)
3251#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BYTE_SWAPEN vxge_mBIT(7)
3252#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BIT_FLIPEN vxge_mBIT(11)
3253#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BYTE_SWAPEN vxge_mBIT(15)
3254#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BIT_FLIPEN vxge_mBIT(19)
3255#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MAX_MVFS(val) vxge_vBIT(val, 20, 16)
3256#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MVF_TBL_SIZE(val) \
3257 vxge_vBIT(val, 36, 16)
3258#define VXGE_HW_MRPCIM_GENERAL_CFG3_PF0_SW_RESET_EN vxge_mBIT(55)
3259#define VXGE_HW_MRPCIM_GENERAL_CFG3_REG_MODIFIED_CFG(val) vxge_vBIT(val, 56, 2)
3260#define VXGE_HW_MRPCIM_GENERAL_CFG3_CPL_ECC_ENABLE_N vxge_mBIT(59)
3261#define VXGE_HW_MRPCIM_GENERAL_CFG3_BYPASS_DAISY_CHAIN vxge_mBIT(63)
3262/*0x07518*/ u64 mrpcim_stats_start_host_addr;
3263#define VXGE_HW_MRPCIM_STATS_START_HOST_ADDR_MRPCIM_STATS_START_HOST_ADDR(val)\
3264 vxge_vBIT(val, 0, 57)
3265
3266 u8 unused07950[0x07950-0x07520];
3267
3268/*0x07950*/ u64 rdcrdtarb_cfg0;
3269#define VXGE_HW_RDCRDTARB_CFG0_RDA_MAX_OUTSTANDING_RDS(val) \
3270 vxge_vBIT(val, 18, 6)
3271#define VXGE_HW_RDCRDTARB_CFG0_PDA_MAX_OUTSTANDING_RDS(val) \
3272 vxge_vBIT(val, 26, 6)
3273#define VXGE_HW_RDCRDTARB_CFG0_DBLGEN_MAX_OUTSTANDING_RDS(val) \
3274 vxge_vBIT(val, 34, 6)
3275#define VXGE_HW_RDCRDTARB_CFG0_WAIT_CNT(val) vxge_vBIT(val, 48, 4)
3276#define VXGE_HW_RDCRDTARB_CFG0_MAX_OUTSTANDING_RDS(val) vxge_vBIT(val, 54, 6)
3277#define VXGE_HW_RDCRDTARB_CFG0_EN_XON vxge_mBIT(63)
3278 u8 unused07be8[0x07be8-0x07958];
3279
3280/*0x07be8*/ u64 bf_sw_reset;
3281#define VXGE_HW_BF_SW_RESET_BF_SW_RESET(val) vxge_vBIT(val, 0, 8)
3282/*0x07bf0*/ u64 sw_reset_status;
3283#define VXGE_HW_SW_RESET_STATUS_RESET_CMPLT vxge_mBIT(7)
3284#define VXGE_HW_SW_RESET_STATUS_INIT_CMPLT vxge_mBIT(15)
3285 u8 unused07d30[0x07d30-0x07bf8];
3286
3287/*0x07d30*/ u64 mrpcim_debug_stats0;
3288#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_WR_DROP(val) vxge_vBIT(val, 0, 32)
3289#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_RD_DROP(val) vxge_vBIT(val, 32, 32)
3290/*0x07d38*/ u64 mrpcim_debug_stats1_vplane[17];
3291#define VXGE_HW_MRPCIM_DEBUG_STATS1_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(val) \
3292 vxge_vBIT(val, 32, 32)
3293/*0x07dc0*/ u64 mrpcim_debug_stats2_vplane[17];
3294#define VXGE_HW_MRPCIM_DEBUG_STATS2_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(val) \
3295 vxge_vBIT(val, 32, 32)
3296/*0x07e48*/ u64 mrpcim_debug_stats3_vplane[17];
3297#define VXGE_HW_MRPCIM_DEBUG_STATS3_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(val) \
3298 vxge_vBIT(val, 32, 32)
3299/*0x07ed0*/ u64 mrpcim_debug_stats4;
3300#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_WR_VPIN_DROP(val) vxge_vBIT(val, 0, 32)
3301#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_RD_VPIN_DROP(val) \
3302 vxge_vBIT(val, 32, 32)
3303/*0x07ed8*/ u64 genstats_count01;
3304#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT1(val) vxge_vBIT(val, 0, 32)
3305#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT0(val) vxge_vBIT(val, 32, 32)
3306/*0x07ee0*/ u64 genstats_count23;
3307#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT3(val) vxge_vBIT(val, 0, 32)
3308#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT2(val) vxge_vBIT(val, 32, 32)
3309/*0x07ee8*/ u64 genstats_count4;
3310#define VXGE_HW_GENSTATS_COUNT4_GENSTATS_COUNT4(val) vxge_vBIT(val, 32, 32)
3311/*0x07ef0*/ u64 genstats_count5;
3312#define VXGE_HW_GENSTATS_COUNT5_GENSTATS_COUNT5(val) vxge_vBIT(val, 32, 32)
3313
3314 u8 unused07f08[0x07f08-0x07ef8];
3315
3316/*0x07f08*/ u64 genstats_cfg[6];
3317#define VXGE_HW_GENSTATS_CFG_DTYPE_SEL(val) vxge_vBIT(val, 3, 5)
3318#define VXGE_HW_GENSTATS_CFG_CLIENT_NO_SEL(val) vxge_vBIT(val, 9, 3)
3319#define VXGE_HW_GENSTATS_CFG_WR_RD_CPL_SEL(val) vxge_vBIT(val, 14, 2)
3320#define VXGE_HW_GENSTATS_CFG_VPATH_SEL(val) vxge_vBIT(val, 31, 17)
3321/*0x07f38*/ u64 genstat_64bit_cfg;
3322#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS0 vxge_mBIT(3)
3323#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS2 vxge_mBIT(7)
3324 u8 unused08000[0x08000-0x07f40];
3325/*0x08000*/ u64 gcmg3_int_status;
3326#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR0_GSTC0_INT vxge_mBIT(0)
3327#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR1_GSTC1_INT vxge_mBIT(1)
3328#define VXGE_HW_GCMG3_INT_STATUS_GH2L_ERR0_GH2L0_INT vxge_mBIT(2)
3329#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR_GH2L1_INT vxge_mBIT(3)
3330#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR2_GH2L2_INT vxge_mBIT(4)
3331#define VXGE_HW_GCMG3_INT_STATUS_GH2L_SMERR0_GH2L3_INT vxge_mBIT(5)
3332#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR3_GH2L4_INT vxge_mBIT(6)
3333/*0x08008*/ u64 gcmg3_int_mask;
3334 u8 unused09000[0x09000-0x8010];
3335
3336/*0x09000*/ u64 g3ifcmd_fb_int_status;
3337#define VXGE_HW_G3IFCMD_FB_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
3338/*0x09008*/ u64 g3ifcmd_fb_int_mask;
3339/*0x09010*/ u64 g3ifcmd_fb_err_reg;
3340#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
3341#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
3342#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
3343 vxge_vBIT(val, 24, 8)
3344#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
3345/*0x09018*/ u64 g3ifcmd_fb_err_mask;
3346/*0x09020*/ u64 g3ifcmd_fb_err_alarm;
3347
3348 u8 unused09400[0x09400-0x09028];
3349
3350/*0x09400*/ u64 g3ifcmd_cmu_int_status;
3351#define VXGE_HW_G3IFCMD_CMU_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
3352/*0x09408*/ u64 g3ifcmd_cmu_int_mask;
3353/*0x09410*/ u64 g3ifcmd_cmu_err_reg;
3354#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
3355#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
3356#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
3357 vxge_vBIT(val, 24, 8)
3358#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
3359/*0x09418*/ u64 g3ifcmd_cmu_err_mask;
3360/*0x09420*/ u64 g3ifcmd_cmu_err_alarm;
3361
3362 u8 unused09800[0x09800-0x09428];
3363
3364/*0x09800*/ u64 g3ifcmd_cml_int_status;
3365#define VXGE_HW_G3IFCMD_CML_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
3366/*0x09808*/ u64 g3ifcmd_cml_int_mask;
3367/*0x09810*/ u64 g3ifcmd_cml_err_reg;
3368#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
3369#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
3370#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
3371 vxge_vBIT(val, 24, 8)
3372#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
3373/*0x09818*/ u64 g3ifcmd_cml_err_mask;
3374/*0x09820*/ u64 g3ifcmd_cml_err_alarm;
3375 u8 unused09b00[0x09b00-0x09828];
3376
3377/*0x09b00*/ u64 vpath_to_vplane_map[17];
3378#define VXGE_HW_VPATH_TO_VPLANE_MAP_VPATH_TO_VPLANE_MAP(val) \
3379 vxge_vBIT(val, 3, 5)
3380 u8 unused09c30[0x09c30-0x09b88];
3381
3382/*0x09c30*/ u64 xgxs_cfg_port[2];
3383#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_LOS(val) vxge_vBIT(val, 16, 4)
3384#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_VALID(val) vxge_vBIT(val, 20, 4)
3385#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_0 vxge_mBIT(27)
3386#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_1(val) vxge_vBIT(val, 29, 3)
3387#define VXGE_HW_XGXS_CFG_PORT_TX_LANE0_SKEW(val) vxge_vBIT(val, 32, 4)
3388#define VXGE_HW_XGXS_CFG_PORT_TX_LANE1_SKEW(val) vxge_vBIT(val, 36, 4)
3389#define VXGE_HW_XGXS_CFG_PORT_TX_LANE2_SKEW(val) vxge_vBIT(val, 40, 4)
3390#define VXGE_HW_XGXS_CFG_PORT_TX_LANE3_SKEW(val) vxge_vBIT(val, 44, 4)
3391/*0x09c40*/ u64 xgxs_rxber_cfg_port[2];
3392#define VXGE_HW_XGXS_RXBER_CFG_PORT_INTERVAL_DUR(val) vxge_vBIT(val, 0, 4)
3393#define VXGE_HW_XGXS_RXBER_CFG_PORT_RXGXS_INTERVAL_CNT(val) \
3394 vxge_vBIT(val, 16, 48)
3395/*0x09c50*/ u64 xgxs_rxber_status_port[2];
3396#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_A_ERR_CNT(val) \
3397 vxge_vBIT(val, 0, 16)
3398#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_B_ERR_CNT(val) \
3399 vxge_vBIT(val, 16, 16)
3400#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_C_ERR_CNT(val) \
3401 vxge_vBIT(val, 32, 16)
3402#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_D_ERR_CNT(val) \
3403 vxge_vBIT(val, 48, 16)
3404/*0x09c60*/ u64 xgxs_status_port[2];
3405#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_TX_ACTIVITY(val) vxge_vBIT(val, 0, 4)
3406#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_RX_ACTIVITY(val) vxge_vBIT(val, 4, 4)
3407#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_FIFO_ERR BIT(11)
3408#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_BYTE_SYNC_LOST(val) \
3409 vxge_vBIT(val, 12, 4)
3410#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_ERR(val) vxge_vBIT(val, 16, 4)
3411#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_ALIGNMENT_ERR vxge_mBIT(23)
3412#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_DEC_ERR(val) vxge_vBIT(val, 24, 8)
3413#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_INS_REQ(val) \
3414 vxge_vBIT(val, 32, 4)
3415#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_DEL_REQ(val) \
3416 vxge_vBIT(val, 36, 4)
3417/*0x09c70*/ u64 xgxs_pma_reset_port[2];
3418#define VXGE_HW_XGXS_PMA_RESET_PORT_SERDES_RESET(val) vxge_vBIT(val, 0, 8)
3419 u8 unused09c90[0x09c90-0x09c80];
3420
3421/*0x09c90*/ u64 xgxs_static_cfg_port[2];
3422#define VXGE_HW_XGXS_STATIC_CFG_PORT_FW_CTRL_SERDES vxge_mBIT(3)
3423 u8 unused09d40[0x09d40-0x09ca0];
3424
3425/*0x09d40*/ u64 xgxs_info_port[2];
3426#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_0(val) vxge_vBIT(val, 0, 32)
3427#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_1(val) vxge_vBIT(val, 32, 32)
3428/*0x09d50*/ u64 ratemgmt_cfg_port[2];
3429#define VXGE_HW_RATEMGMT_CFG_PORT_MODE(val) vxge_vBIT(val, 2, 2)
3430#define VXGE_HW_RATEMGMT_CFG_PORT_RATE vxge_mBIT(7)
3431#define VXGE_HW_RATEMGMT_CFG_PORT_FIXED_USE_FSM vxge_mBIT(11)
3432#define VXGE_HW_RATEMGMT_CFG_PORT_ANTP_USE_FSM vxge_mBIT(15)
3433#define VXGE_HW_RATEMGMT_CFG_PORT_ANBE_USE_FSM vxge_mBIT(19)
3434/*0x09d60*/ u64 ratemgmt_status_port[2];
3435#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_COMPLETE vxge_mBIT(3)
3436#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_RATE vxge_mBIT(7)
3437#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_MAC_MATCHES_PHY vxge_mBIT(11)
3438 u8 unused09d80[0x09d80-0x09d70];
3439
3440/*0x09d80*/ u64 ratemgmt_fixed_cfg_port[2];
3441#define VXGE_HW_RATEMGMT_FIXED_CFG_PORT_RESTART vxge_mBIT(7)
3442/*0x09d90*/ u64 ratemgmt_antp_cfg_port[2];
3443#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_RESTART vxge_mBIT(7)
3444#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_PREAMBLE_EXT_PHY vxge_mBIT(11)
3445#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_ACT_SEL vxge_mBIT(15)
3446#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_RETRY_PHY_QUERY(val) \
3447 vxge_vBIT(val, 16, 4)
3448#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_WAIT_MDIO_RESPONSE(val) \
3449 vxge_vBIT(val, 20, 4)
3450#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_LDOWN_REAUTO_RESPONSE(val) \
3451 vxge_vBIT(val, 24, 4)
3452#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_10G vxge_mBIT(31)
3453#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_1G vxge_mBIT(35)
3454/*0x09da0*/ u64 ratemgmt_anbe_cfg_port[2];
3455#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_RESTART vxge_mBIT(7)
3456#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_10G_KX4_ENABLE \
3457 vxge_mBIT(11)
3458#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_1G_KX_ENABLE \
3459 vxge_mBIT(15)
3460#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_10G_KX4(val) vxge_vBIT(val, 16, 4)
3461#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_1G_KX(val) vxge_vBIT(val, 20, 4)
3462#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_DME_EXCHANGE(val) vxge_vBIT(val, 24, 4)
3463#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_10G_KX4 vxge_mBIT(31)
3464#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_1G_KX vxge_mBIT(35)
3465/*0x09db0*/ u64 anbe_cfg_port[2];
3466#define VXGE_HW_ANBE_CFG_PORT_RESET_CFG_REGS(val) vxge_vBIT(val, 0, 8)
3467#define VXGE_HW_ANBE_CFG_PORT_ALIGN_10G_KX4_OVERRIDE(val) vxge_vBIT(val, 10, 2)
3468#define VXGE_HW_ANBE_CFG_PORT_SYNC_1G_KX_OVERRIDE(val) vxge_vBIT(val, 14, 2)
3469/*0x09dc0*/ u64 anbe_mgr_ctrl_port[2];
3470#define VXGE_HW_ANBE_MGR_CTRL_PORT_WE vxge_mBIT(3)
3471#define VXGE_HW_ANBE_MGR_CTRL_PORT_STROBE vxge_mBIT(7)
3472#define VXGE_HW_ANBE_MGR_CTRL_PORT_ADDR(val) vxge_vBIT(val, 15, 9)
3473#define VXGE_HW_ANBE_MGR_CTRL_PORT_DATA(val) vxge_vBIT(val, 32, 32)
3474 u8 unused09de0[0x09de0-0x09dd0];
3475
3476/*0x09de0*/ u64 anbe_fw_mstr_port[2];
3477#define VXGE_HW_ANBE_FW_MSTR_PORT_CONNECT_BEAN_TO_SERDES vxge_mBIT(3)
3478#define VXGE_HW_ANBE_FW_MSTR_PORT_TX_ZEROES_TO_SERDES vxge_mBIT(7)
3479/*0x09df0*/ u64 anbe_hwfsm_gen_status_port[2];
3480#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_PD \
3481 vxge_mBIT(3)
3482#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_DME \
3483 vxge_mBIT(7)
3484#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_PD \
3485 vxge_mBIT(11)
3486#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_DME \
3487 vxge_mBIT(15)
3488#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANBEFSM_STATE(val) \
3489 vxge_vBIT(val, 18, 6)
3490#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_NEXT_PAGE_RECEIVED \
3491 vxge_mBIT(27)
3492#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_BASE_PAGE_RECEIVED \
3493 vxge_mBIT(35)
3494#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_AUTONEG_COMPLETE \
3495 vxge_mBIT(39)
3496#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NP_BEFORE_BP \
3497 vxge_mBIT(43)
3498#define \
3499VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_BP \
3500 vxge_mBIT(47)
3501#define \
3502VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_NP \
3503vxge_mBIT(51)
3504#define \
3505VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MODE_WHEN_AN_COMPLETE \
3506 vxge_mBIT(55)
3507#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_BP(val) \
3508 vxge_vBIT(val, 56, 4)
3509#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_NP(val) \
3510 vxge_vBIT(val, 60, 4)
3511/*0x09e00*/ u64 anbe_hwfsm_bp_status_port[2];
3512#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ENABLE \
3513 vxge_mBIT(32)
3514#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ABILITY \
3515 vxge_mBIT(33)
3516#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KR_CAPABLE \
3517 vxge_mBIT(40)
3518#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KX4_CAPABLE \
3519 vxge_mBIT(41)
3520#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_1G_KX_CAPABLE \
3521 vxge_mBIT(42)
3522#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_TX_NONCE(val) \
3523 vxge_vBIT(val, 43, 5)
3524#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(48)
3525#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(49)
3526#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_REMOTE_FAULT \
3527 vxge_mBIT(50)
3528#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ASM_DIR vxge_mBIT(51)
3529#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_PAUSE vxge_mBIT(53)
3530#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ECHOED_NONCE(val) \
3531 vxge_vBIT(val, 54, 5)
3532#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
3533 vxge_vBIT(val, 59, 5)
3534/*0x09e10*/ u64 anbe_hwfsm_np_status_port[2];
3535#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_47_TO_32(val) \
3536 vxge_vBIT(val, 16, 16)
3537#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_31_TO_0(val) \
3538 vxge_vBIT(val, 32, 32)
3539 u8 unused09e30[0x09e30-0x09e20];
3540
3541/*0x09e30*/ u64 antp_gen_cfg_port[2];
3542/*0x09e40*/ u64 antp_hwfsm_gen_status_port[2];
3543#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G vxge_mBIT(3)
3544#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G vxge_mBIT(7)
3545#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANTPFSM_STATE(val) \
3546 vxge_vBIT(val, 10, 6)
3547#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_AUTONEG_COMPLETE \
3548 vxge_mBIT(23)
3549#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_LP_XNP \
3550 vxge_mBIT(27)
3551#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_GOT_LP_XNP vxge_mBIT(31)
3552#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MESSAGE_CODE \
3553 vxge_mBIT(35)
3554#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_HCD \
3555 vxge_mBIT(43)
3556#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_FOUND_HCD vxge_mBIT(47)
3557#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_INVALID_RATE \
3558 vxge_mBIT(51)
3559#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_VALID_RATE vxge_mBIT(55)
3560#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_PERSISTENT_LDOWN \
3561 vxge_mBIT(59)
3562/*0x09e50*/ u64 antp_hwfsm_bp_status_port[2];
3563#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(0)
3564#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(1)
3565#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_RF vxge_mBIT(2)
3566#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_XNP vxge_mBIT(3)
3567#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ABILITY_FIELD(val) \
3568 vxge_vBIT(val, 4, 7)
3569#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
3570 vxge_vBIT(val, 11, 5)
3571/*0x09e60*/ u64 antp_hwfsm_xnp_status_port[2];
3572#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_NP vxge_mBIT(0)
3573#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK vxge_mBIT(1)
3574#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MP vxge_mBIT(2)
3575#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK2 vxge_mBIT(3)
3576#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_TOGGLE vxge_mBIT(4)
3577#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MESSAGE_CODE(val) \
3578 vxge_vBIT(val, 5, 11)
3579#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD1(val) \
3580 vxge_vBIT(val, 16, 16)
3581#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD2(val) \
3582 vxge_vBIT(val, 32, 16)
3583/*0x09e70*/ u64 mdio_mgr_access_port[2];
3584#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_ONE BIT(3)
3585#define VXGE_HW_MDIO_MGR_ACCESS_PORT_OP_TYPE(val) vxge_vBIT(val, 5, 3)
3586#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DEVAD(val) vxge_vBIT(val, 11, 5)
3587#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ADDR(val) vxge_vBIT(val, 16, 16)
3588#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DATA(val) vxge_vBIT(val, 32, 16)
3589#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ST_PATTERN(val) vxge_vBIT(val, 49, 2)
3590#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PREAMBLE vxge_mBIT(51)
3591#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PRTAD(val) vxge_vBIT(val, 55, 5)
3592#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_TWO vxge_mBIT(63)
3593 u8 unused0a200[0x0a200-0x09e80];
3594/*0x0a200*/ u64 xmac_vsport_choices_vh[17];
3595#define VXGE_HW_XMAC_VSPORT_CHOICES_VH_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
3596 u8 unused0a400[0x0a400-0x0a288];
3597
3598/*0x0a400*/ u64 rx_thresh_cfg_vp[17];
3599#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
3600#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
3601#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_0(val) vxge_vBIT(val, 16, 8)
3602#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_1(val) vxge_vBIT(val, 24, 8)
3603#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_2(val) vxge_vBIT(val, 32, 8)
3604#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_3(val) vxge_vBIT(val, 40, 8)
3605 u8 unused0ac90[0x0ac90-0x0a488];
3606} __packed;
3607
3608/*VXGE_HW_SRPCIM_REGS_H*/
3609struct vxge_hw_srpcim_reg {
3610
3611/*0x00000*/ u64 tim_mr2sr_resource_assignment_vh;
3612#define VXGE_HW_TIM_MR2SR_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) \
3613 vxge_vBIT(val, 0, 32)
3614 u8 unused00100[0x00100-0x00008];
3615
3616/*0x00100*/ u64 srpcim_pcipif_int_status;
3617#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_MRPCIM_MSG_MRPCIM_MSG_INT BIT(3)
3618#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_VPATH_MSG_VPATH_MSG_INT BIT(7)
3619#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_SRPCIM_SPARE_R1_SRPCIM_SPARE_R1_INT \
3620 BIT(11)
3621/*0x00108*/ u64 srpcim_pcipif_int_mask;
3622/*0x00110*/ u64 mrpcim_msg_reg;
3623#define VXGE_HW_MRPCIM_MSG_REG_SWIF_MRPCIM_TO_SRPCIM_RMSG_INT BIT(3)
3624/*0x00118*/ u64 mrpcim_msg_mask;
3625/*0x00120*/ u64 mrpcim_msg_alarm;
3626/*0x00128*/ u64 vpath_msg_reg;
3627#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH0_TO_SRPCIM_RMSG_INT BIT(0)
3628#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH1_TO_SRPCIM_RMSG_INT BIT(1)
3629#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH2_TO_SRPCIM_RMSG_INT BIT(2)
3630#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH3_TO_SRPCIM_RMSG_INT BIT(3)
3631#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH4_TO_SRPCIM_RMSG_INT BIT(4)
3632#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH5_TO_SRPCIM_RMSG_INT BIT(5)
3633#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH6_TO_SRPCIM_RMSG_INT BIT(6)
3634#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH7_TO_SRPCIM_RMSG_INT BIT(7)
3635#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH8_TO_SRPCIM_RMSG_INT BIT(8)
3636#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH9_TO_SRPCIM_RMSG_INT BIT(9)
3637#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH10_TO_SRPCIM_RMSG_INT BIT(10)
3638#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH11_TO_SRPCIM_RMSG_INT BIT(11)
3639#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH12_TO_SRPCIM_RMSG_INT BIT(12)
3640#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH13_TO_SRPCIM_RMSG_INT BIT(13)
3641#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH14_TO_SRPCIM_RMSG_INT BIT(14)
3642#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH15_TO_SRPCIM_RMSG_INT BIT(15)
3643#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH16_TO_SRPCIM_RMSG_INT BIT(16)
3644/*0x00130*/ u64 vpath_msg_mask;
3645/*0x00138*/ u64 vpath_msg_alarm;
3646 u8 unused00160[0x00160-0x00140];
3647
3648/*0x00160*/ u64 srpcim_to_mrpcim_wmsg;
3649#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_SRPCIM_TO_MRPCIM_WMSG(val) \
3650 vxge_vBIT(val, 0, 64)
3651/*0x00168*/ u64 srpcim_to_mrpcim_wmsg_trig;
3652#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_TRIG_SRPCIM_TO_MRPCIM_WMSG_TRIG BIT(0)
3653/*0x00170*/ u64 mrpcim_to_srpcim_rmsg;
3654#define VXGE_HW_MRPCIM_TO_SRPCIM_RMSG_SWIF_MRPCIM_TO_SRPCIM_RMSG(val) \
3655 vxge_vBIT(val, 0, 64)
3656/*0x00178*/ u64 vpath_to_srpcim_rmsg_sel;
3657#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SEL_VPATH_TO_SRPCIM_RMSG_SEL(val) \
3658 vxge_vBIT(val, 0, 5)
3659/*0x00180*/ u64 vpath_to_srpcim_rmsg;
3660#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SWIF_VPATH_TO_SRPCIM_RMSG(val) \
3661 vxge_vBIT(val, 0, 64)
3662 u8 unused00200[0x00200-0x00188];
3663
3664/*0x00200*/ u64 srpcim_general_int_status;
3665#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PIC_INT BIT(0)
3666#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PCI_INT BIT(3)
3667#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_XMAC_INT BIT(7)
3668 u8 unused00210[0x00210-0x00208];
3669
3670/*0x00210*/ u64 srpcim_general_int_mask;
3671#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PIC_INT BIT(0)
3672#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PCI_INT BIT(3)
3673#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_XMAC_INT BIT(7)
3674 u8 unused00220[0x00220-0x00218];
3675
3676/*0x00220*/ u64 srpcim_ppif_int_status;
3677
3678/*0x00228*/ u64 srpcim_ppif_int_mask;
3679/*0x00230*/ u64 srpcim_gen_errors_reg;
3680#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_STATUS_ERR BIT(3)
3681#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_UNCOR_ERR BIT(7)
3682#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_COR_ERR BIT(11)
3683#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INTCTRL_SCHED_INT BIT(15)
3684#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INI_SERR_DET BIT(19)
3685#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_TGT_PF_ILLEGAL_ACCESS BIT(23)
3686/*0x00238*/ u64 srpcim_gen_errors_mask;
3687/*0x00240*/ u64 srpcim_gen_errors_alarm;
3688/*0x00248*/ u64 mrpcim_to_srpcim_alarm_reg;
3689#define VXGE_HW_MRPCIM_TO_SRPCIM_ALARM_REG_PPIF_MRPCIM_TO_SRPCIM_ALARM BIT(3)
3690/*0x00250*/ u64 mrpcim_to_srpcim_alarm_mask;
3691/*0x00258*/ u64 mrpcim_to_srpcim_alarm_alarm;
3692/*0x00260*/ u64 vpath_to_srpcim_alarm_reg;
3693
3694/*0x00268*/ u64 vpath_to_srpcim_alarm_mask;
3695/*0x00270*/ u64 vpath_to_srpcim_alarm_alarm;
3696 u8 unused00280[0x00280-0x00278];
3697
3698/*0x00280*/ u64 pf_sw_reset;
3699#define VXGE_HW_PF_SW_RESET_PF_SW_RESET(val) vxge_vBIT(val, 0, 8)
3700/*0x00288*/ u64 srpcim_general_cfg1;
3701#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BYTE_SWAPEN BIT(19)
3702#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BIT_FLIPEN BIT(23)
3703#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_SWAPEN BIT(27)
3704#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_FLIPEN BIT(31)
3705#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_SWAPEN BIT(35)
3706#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_FLIPEN BIT(39)
3707/*0x00290*/ u64 srpcim_interrupt_cfg1;
3708#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
3709#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_TRAFFIC_CLASS(val) vxge_vBIT(val, 9, 3)
3710 u8 unused002a8[0x002a8-0x00298];
3711
3712/*0x002a8*/ u64 srpcim_clear_msix_mask;
3713#define VXGE_HW_SRPCIM_CLEAR_MSIX_MASK_SRPCIM_CLEAR_MSIX_MASK BIT(0)
3714/*0x002b0*/ u64 srpcim_set_msix_mask;
3715#define VXGE_HW_SRPCIM_SET_MSIX_MASK_SRPCIM_SET_MSIX_MASK BIT(0)
3716/*0x002b8*/ u64 srpcim_clr_msix_one_shot;
3717#define VXGE_HW_SRPCIM_CLR_MSIX_ONE_SHOT_SRPCIM_CLR_MSIX_ONE_SHOT BIT(0)
3718/*0x002c0*/ u64 srpcim_rst_in_prog;
3719#define VXGE_HW_SRPCIM_RST_IN_PROG_SRPCIM_RST_IN_PROG BIT(7)
3720/*0x002c8*/ u64 srpcim_reg_modified;
3721#define VXGE_HW_SRPCIM_REG_MODIFIED_SRPCIM_REG_MODIFIED BIT(7)
3722/*0x002d0*/ u64 tgt_pf_illegal_access;
3723#define VXGE_HW_TGT_PF_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
3724/*0x002d8*/ u64 srpcim_msix_status;
3725#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_MASK BIT(3)
3726#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_PENDING_VECTOR BIT(7)
3727 u8 unused00880[0x00880-0x002e0];
3728
3729/*0x00880*/ u64 xgmac_sr_int_status;
3730#define VXGE_HW_XGMAC_SR_INT_STATUS_ASIC_NTWK_SR_ERR_ASIC_NTWK_SR_INT BIT(3)
3731/*0x00888*/ u64 xgmac_sr_int_mask;
3732/*0x00890*/ u64 asic_ntwk_sr_err_reg;
3733#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT BIT(3)
3734#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK BIT(7)
3735#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT_OCCURRED \
3736 BIT(11)
3737#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK_OCCURRED BIT(15)
3738/*0x00898*/ u64 asic_ntwk_sr_err_mask;
3739/*0x008a0*/ u64 asic_ntwk_sr_err_alarm;
3740 u8 unused008c0[0x008c0-0x008a8];
3741
3742/*0x008c0*/ u64 xmac_vsport_choices_sr_clone;
3743#define VXGE_HW_XMAC_VSPORT_CHOICES_SR_CLONE_VSPORT_VECTOR(val) \
3744 vxge_vBIT(val, 0, 17)
3745 u8 unused00900[0x00900-0x008c8];
3746
3747/*0x00900*/ u64 mr_rqa_top_prty_for_vh;
3748#define VXGE_HW_MR_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
3749 vxge_vBIT(val, 59, 5)
3750/*0x00908*/ u64 umq_vh_data_list_empty;
3751#define VXGE_HW_UMQ_VH_DATA_LIST_EMPTY_ROCRC_UMQ_VH_DATA_LIST_EMPTY \
3752 BIT(0)
3753/*0x00910*/ u64 wde_cfg;
3754#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_START BIT(0)
3755#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_END BIT(1)
3756#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_START BIT(2)
3757#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_END BIT(3)
3758#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_START BIT(4)
3759#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_END BIT(5)
3760#define VXGE_HW_WDE_CFG_NS0_MWB_OPT_EN BIT(6)
3761#define VXGE_HW_WDE_CFG_NS0_QB_OPT_EN BIT(7)
3762#define VXGE_HW_WDE_CFG_NS0_MPSB_OPT_EN BIT(8)
3763#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_START BIT(9)
3764#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_END BIT(10)
3765#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_START BIT(11)
3766#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_END BIT(12)
3767#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_START BIT(13)
3768#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_END BIT(14)
3769#define VXGE_HW_WDE_CFG_NS1_MWB_OPT_EN BIT(15)
3770#define VXGE_HW_WDE_CFG_NS1_QB_OPT_EN BIT(16)
3771#define VXGE_HW_WDE_CFG_NS1_MPSB_OPT_EN BIT(17)
3772#define VXGE_HW_WDE_CFG_DISABLE_QPAD_FOR_UNALIGNED_ADDR BIT(19)
3773#define VXGE_HW_WDE_CFG_ALIGNMENT_PREFERENCE(val) vxge_vBIT(val, 30, 2)
3774#define VXGE_HW_WDE_CFG_MEM_WORD_SIZE(val) vxge_vBIT(val, 46, 2)
3775
3776} __packed;
3777
3778/*VXGE_HW_VPMGMT_REGS_H*/
3779struct vxge_hw_vpmgmt_reg {
3780
3781 u8 unused00040[0x00040-0x00000];
3782
3783/*0x00040*/ u64 vpath_to_func_map_cfg1;
3784#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_VPATH_TO_FUNC_MAP_CFG1(val) \
3785 vxge_vBIT(val, 3, 5)
3786/*0x00048*/ u64 vpath_is_first;
3787#define VXGE_HW_VPATH_IS_FIRST_VPATH_IS_FIRST vxge_mBIT(3)
3788/*0x00050*/ u64 srpcim_to_vpath_wmsg;
3789#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_SRPCIM_TO_VPATH_WMSG(val) \
3790 vxge_vBIT(val, 0, 64)
3791/*0x00058*/ u64 srpcim_to_vpath_wmsg_trig;
3792#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_TRIG_SRPCIM_TO_VPATH_WMSG_TRIG \
3793 vxge_mBIT(0)
3794 u8 unused00100[0x00100-0x00060];
3795
3796/*0x00100*/ u64 tim_vpath_assignment;
3797#define VXGE_HW_TIM_VPATH_ASSIGNMENT_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
3798 u8 unused00140[0x00140-0x00108];
3799
3800/*0x00140*/ u64 rqa_top_prty_for_vp;
3801#define VXGE_HW_RQA_TOP_PRTY_FOR_VP_RQA_TOP_PRTY_FOR_VP(val) \
3802 vxge_vBIT(val, 59, 5)
3803 u8 unused001c0[0x001c0-0x00148];
3804
3805/*0x001c0*/ u64 rxmac_rx_pa_cfg0_vpmgmt_clone;
3806#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IGNORE_FRAME_ERR vxge_mBIT(3)
3807#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_SNAP_AB_N vxge_mBIT(7)
3808#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_HAO vxge_mBIT(18)
3809#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_MOBILE_IPV6_HDRS \
3810 vxge_mBIT(19)
3811#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IPV6_STOP_SEARCHING \
3812 vxge_mBIT(23)
3813#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_NO_PS_IF_UNKNOWN vxge_mBIT(27)
3814#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_ETYPE vxge_mBIT(35)
3815#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L3_CSUM_ERR \
3816 vxge_mBIT(39)
3817#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR \
3818 vxge_mBIT(43)
3819#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L4_CSUM_ERR \
3820 vxge_mBIT(47)
3821#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR \
3822 vxge_mBIT(51)
3823#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_RPA_ERR \
3824 vxge_mBIT(55)
3825#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_RPA_ERR \
3826 vxge_mBIT(59)
3827#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_JUMBO_SNAP_EN vxge_mBIT(63)
3828/*0x001c8*/ u64 rts_mgr_cfg0_vpmgmt_clone;
3829#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_RTS_DP_SP_PRIORITY vxge_mBIT(3)
3830#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_FLEX_L4PRTCL_VALUE(val) \
3831 vxge_vBIT(val, 24, 8)
3832#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ICMP_TRASH vxge_mBIT(35)
3833#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_TCPSYN_TRASH vxge_mBIT(39)
3834#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ZL4PYLD_TRASH vxge_mBIT(43)
3835#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_TCP_TRASH vxge_mBIT(47)
3836#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_UDP_TRASH vxge_mBIT(51)
3837#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
3838#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_IPFRAG_TRASH vxge_mBIT(59)
3839/*0x001d0*/ u64 rts_mgr_criteria_priority_vpmgmt_clone;
3840#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ETYPE(val) \
3841 vxge_vBIT(val, 5, 3)
3842#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ICMP_TCPSYN(val) \
3843 vxge_vBIT(val, 9, 3)
3844#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PN(val) \
3845 vxge_vBIT(val, 13, 3)
3846#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RANGE_L4PN(val) \
3847 vxge_vBIT(val, 17, 3)
3848#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RTH_IT(val) \
3849 vxge_vBIT(val, 21, 3)
3850#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_DS(val) \
3851 vxge_vBIT(val, 25, 3)
3852#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_QOS(val) \
3853 vxge_vBIT(val, 29, 3)
3854#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ZL4PYLD(val) \
3855 vxge_vBIT(val, 33, 3)
3856#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PRTCL(val) \
3857 vxge_vBIT(val, 37, 3)
3858/*0x001d8*/ u64 rxmac_cfg0_port_vpmgmt_clone[3];
3859#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_RMAC_EN vxge_mBIT(3)
3860#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS vxge_mBIT(7)
3861#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_DISCARD_PFRM vxge_mBIT(11)
3862#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_FCS_ERR vxge_mBIT(15)
3863#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LONG_ERR vxge_mBIT(19)
3864#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_USIZED_ERR vxge_mBIT(23)
3865#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LEN_MISMATCH \
3866 vxge_mBIT(27)
3867#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_MAX_PYLD_LEN(val) \
3868 vxge_vBIT(val, 50, 14)
3869/*0x001f0*/ u64 rxmac_pause_cfg_port_vpmgmt_clone[3];
3870#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_GEN_EN vxge_mBIT(3)
3871#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_RCV_EN vxge_mBIT(7)
3872#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_ACCEL_SEND(val) \
3873 vxge_vBIT(val, 9, 3)
3874#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_DUAL_THR vxge_mBIT(15)
3875#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_HIGH_PTIME(val) \
3876 vxge_vBIT(val, 20, 16)
3877#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_FCS_ERR \
3878 vxge_mBIT(39)
3879#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_LEN_ERR \
3880 vxge_mBIT(43)
3881#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_LIMITER_EN vxge_mBIT(47)
3882#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_MAX_LIMIT(val) \
3883 vxge_vBIT(val, 48, 8)
3884#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_PERMIT_RATEMGMT_CTRL \
3885 vxge_mBIT(59)
3886 u8 unused00240[0x00240-0x00208];
3887
3888/*0x00240*/ u64 xmac_vsport_choices_vp;
3889#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
3890 u8 unused00260[0x00260-0x00248];
3891
3892/*0x00260*/ u64 xgmac_gen_status_vpmgmt_clone;
3893#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK vxge_mBIT(3)
3894#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_DATA_RATE \
3895 vxge_mBIT(11)
3896/*0x00268*/ u64 xgmac_status_port_vpmgmt_clone[2];
3897#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_REMOTE_FAULT \
3898 vxge_mBIT(3)
3899#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_LOCAL_FAULT vxge_mBIT(7)
3900#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_MAC_PHY_LAYER_AVAIL \
3901 vxge_mBIT(11)
3902#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_PORT_OK vxge_mBIT(15)
3903/*0x00278*/ u64 xmac_gen_cfg_vpmgmt_clone;
3904#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_RATEMGMT_MAC_RATE_SEL(val) \
3905 vxge_vBIT(val, 2, 2)
3906#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_TX_HEAD_DROP_WHEN_FAULT \
3907 vxge_mBIT(7)
3908#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_FAULT_BEHAVIOUR vxge_mBIT(27)
3909#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_UP(val) \
3910 vxge_vBIT(val, 28, 4)
3911#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_DOWN(val) \
3912 vxge_vBIT(val, 32, 4)
3913/*0x00280*/ u64 xmac_timestamp_vpmgmt_clone;
3914#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_EN vxge_mBIT(3)
3915#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_USE_LINK_ID(val) \
3916 vxge_vBIT(val, 6, 2)
3917#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_INTERVAL(val) vxge_vBIT(val, 12, 4)
3918#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_TIMER_RESTART vxge_mBIT(19)
3919#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_XMACJ_ROLLOVER_CNT(val) \
3920 vxge_vBIT(val, 32, 16)
3921/*0x00288*/ u64 xmac_stats_gen_cfg_vpmgmt_clone;
3922#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_PRTAGGR_CUM_TIMER(val) \
3923 vxge_vBIT(val, 4, 4)
3924#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VPATH_CUM_TIMER(val) \
3925 vxge_vBIT(val, 8, 4)
3926#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VLAN_HANDLING vxge_mBIT(15)
3927/*0x00290*/ u64 xmac_cfg_port_vpmgmt_clone[3];
3928#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_LOOPBACK vxge_mBIT(3)
3929#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_REVERSE_LOOPBACK \
3930 vxge_mBIT(7)
3931#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_TX_BEHAV vxge_mBIT(11)
3932#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_RX_BEHAV vxge_mBIT(15)
3933 u8 unused002c0[0x002c0-0x002a8];
3934
3935/*0x002c0*/ u64 txmac_gen_cfg0_vpmgmt_clone;
3936#define VXGE_HW_TXMAC_GEN_CFG0_VPMGMT_CLONE_CHOSEN_TX_PORT vxge_mBIT(7)
3937/*0x002c8*/ u64 txmac_cfg0_port_vpmgmt_clone[3];
3938#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_TMAC_EN vxge_mBIT(3)
3939#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_APPEND_PAD vxge_mBIT(7)
3940#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
3941 u8 unused00300[0x00300-0x002e0];
3942
3943/*0x00300*/ u64 wol_mp_crc;
3944#define VXGE_HW_WOL_MP_CRC_CRC(val) vxge_vBIT(val, 0, 32)
3945#define VXGE_HW_WOL_MP_CRC_RC_EN vxge_mBIT(63)
3946/*0x00308*/ u64 wol_mp_mask_a;
3947#define VXGE_HW_WOL_MP_MASK_A_MASK(val) vxge_vBIT(val, 0, 64)
3948/*0x00310*/ u64 wol_mp_mask_b;
3949#define VXGE_HW_WOL_MP_MASK_B_MASK(val) vxge_vBIT(val, 0, 64)
3950 u8 unused00360[0x00360-0x00318];
3951
3952/*0x00360*/ u64 fau_pa_cfg_vpmgmt_clone;
3953#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L4_COMP_CSUM vxge_mBIT(3)
3954#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_INCL_CF vxge_mBIT(7)
3955#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_COMP_CSUM vxge_mBIT(11)
3956/*0x00368*/ u64 rx_datapath_util_vp_clone;
3957#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_UTILIZATION(val) \
3958 vxge_vBIT(val, 7, 9)
3959#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_UTIL_CFG(val) \
3960 vxge_vBIT(val, 16, 4)
3961#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_FRAC_UTIL(val) \
3962 vxge_vBIT(val, 20, 4)
3963#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_PKT_WEIGHT(val) \
3964 vxge_vBIT(val, 24, 4)
3965 u8 unused00380[0x00380-0x00370];
3966
3967/*0x00380*/ u64 tx_datapath_util_vp_clone;
3968#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_UTILIZATION(val) \
3969 vxge_vBIT(val, 7, 9)
3970#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_UTIL_CFG(val) \
3971 vxge_vBIT(val, 16, 4)
3972#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_FRAC_UTIL(val) \
3973 vxge_vBIT(val, 20, 4)
3974#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_PKT_WEIGHT(val) \
3975 vxge_vBIT(val, 24, 4)
3976
3977} __packed;
3978
3979struct vxge_hw_vpath_reg {
3980
3981 u8 unused00300[0x00300];
3982
3983/*0x00300*/ u64 usdc_vpath;
3984#define VXGE_HW_USDC_VPATH_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 32)
3985 u8 unused00a00[0x00a00-0x00308];
3986
3987/*0x00a00*/ u64 wrdma_alarm_status;
3988#define VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT vxge_mBIT(1)
3989/*0x00a08*/ u64 wrdma_alarm_mask;
3990 u8 unused00a30[0x00a30-0x00a10];
3991
3992/*0x00a30*/ u64 prc_alarm_reg;
3993#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP vxge_mBIT(0)
3994#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR vxge_mBIT(1)
3995#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT vxge_mBIT(2)
3996#define VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR vxge_mBIT(3)
3997/*0x00a38*/ u64 prc_alarm_mask;
3998/*0x00a40*/ u64 prc_alarm_alarm;
3999/*0x00a48*/ u64 prc_cfg1;
4000#define VXGE_HW_PRC_CFG1_RX_TIMER_VAL(val) vxge_vBIT(val, 3, 29)
4001#define VXGE_HW_PRC_CFG1_TIM_RING_BUMP_INT_ENABLE vxge_mBIT(34)
4002#define VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE vxge_mBIT(35)
4003#define VXGE_HW_PRC_CFG1_GREEDY_RETURN vxge_mBIT(36)
4004#define VXGE_HW_PRC_CFG1_QUICK_SHOT vxge_mBIT(37)
4005#define VXGE_HW_PRC_CFG1_RX_TIMER_CI vxge_mBIT(39)
4006#define VXGE_HW_PRC_CFG1_RESET_TIMER_ON_RXD_RET(val) vxge_vBIT(val, 40, 2)
4007 u8 unused00a60[0x00a60-0x00a50];
4008
4009/*0x00a60*/ u64 prc_cfg4;
4010#define VXGE_HW_PRC_CFG4_IN_SVC vxge_mBIT(7)
4011#define VXGE_HW_PRC_CFG4_RING_MODE(val) vxge_vBIT(val, 14, 2)
4012#define VXGE_HW_PRC_CFG4_RXD_NO_SNOOP vxge_mBIT(22)
4013#define VXGE_HW_PRC_CFG4_FRM_NO_SNOOP vxge_mBIT(23)
4014#define VXGE_HW_PRC_CFG4_RTH_DISABLE vxge_mBIT(31)
4015#define VXGE_HW_PRC_CFG4_IGNORE_OWNERSHIP vxge_mBIT(32)
4016#define VXGE_HW_PRC_CFG4_SIGNAL_BENIGN_OVFLW vxge_mBIT(36)
4017#define VXGE_HW_PRC_CFG4_BIMODAL_INTERRUPT vxge_mBIT(37)
4018#define VXGE_HW_PRC_CFG4_BACKOFF_INTERVAL(val) vxge_vBIT(val, 40, 24)
4019/*0x00a68*/ u64 prc_cfg5;
4020#define VXGE_HW_PRC_CFG5_RXD0_ADD(val) vxge_vBIT(val, 0, 61)
4021/*0x00a70*/ u64 prc_cfg6;
4022#define VXGE_HW_PRC_CFG6_FRM_PAD_EN vxge_mBIT(0)
4023#define VXGE_HW_PRC_CFG6_QSIZE_ALIGNED_RXD vxge_mBIT(2)
4024#define VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN vxge_mBIT(5)
4025#define VXGE_HW_PRC_CFG6_L3_CPC_TRSFR_CODE_EN vxge_mBIT(8)
4026#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
4027#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
4028#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
4029#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
4030/*0x00a78*/ u64 prc_cfg7;
4031#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
4032#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
4033#define VXGE_HW_PRC_CFG7_RXD_NS_CHG_EN vxge_mBIT(12)
4034#define VXGE_HW_PRC_CFG7_NO_HDR_SEPARATION vxge_mBIT(14)
4035#define VXGE_HW_PRC_CFG7_RXD_BUFF_SIZE_MASK(val) vxge_vBIT(val, 20, 4)
4036#define VXGE_HW_PRC_CFG7_BUFF_SIZE0_MASK(val) vxge_vBIT(val, 27, 5)
4037/*0x00a80*/ u64 tim_dest_addr;
4038#define VXGE_HW_TIM_DEST_ADDR_TIM_DEST_ADDR(val) vxge_vBIT(val, 0, 64)
4039/*0x00a88*/ u64 prc_rxd_doorbell;
4040#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val) vxge_vBIT(val, 48, 16)
4041/*0x00a90*/ u64 rqa_prty_for_vp;
4042#define VXGE_HW_RQA_PRTY_FOR_VP_RQA_PRTY_FOR_VP(val) vxge_vBIT(val, 59, 5)
4043/*0x00a98*/ u64 rxdmem_size;
4044#define VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(val) vxge_vBIT(val, 51, 13)
4045/*0x00aa0*/ u64 frm_in_progress_cnt;
4046#define VXGE_HW_FRM_IN_PROGRESS_CNT_PRC_FRM_IN_PROGRESS_CNT(val) \
4047 vxge_vBIT(val, 59, 5)
4048/*0x00aa8*/ u64 rx_multi_cast_stats;
4049#define VXGE_HW_RX_MULTI_CAST_STATS_FRAME_DISCARD(val) vxge_vBIT(val, 48, 16)
4050/*0x00ab0*/ u64 rx_frm_transferred;
4051#define VXGE_HW_RX_FRM_TRANSFERRED_RX_FRM_TRANSFERRED(val) \
4052 vxge_vBIT(val, 32, 32)
4053/*0x00ab8*/ u64 rxd_returned;
4054#define VXGE_HW_RXD_RETURNED_RXD_RETURNED(val) vxge_vBIT(val, 48, 16)
4055 u8 unused00c00[0x00c00-0x00ac0];
4056
4057/*0x00c00*/ u64 kdfc_fifo_trpl_partition;
4058#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(val) vxge_vBIT(val, 17, 15)
4059#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_1(val) vxge_vBIT(val, 33, 15)
4060#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_2(val) vxge_vBIT(val, 49, 15)
4061/*0x00c08*/ u64 kdfc_fifo_trpl_ctrl;
4062#define VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE vxge_mBIT(7)
4063/*0x00c10*/ u64 kdfc_trpl_fifo_0_ctrl;
4064#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
4065#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_FLIP_EN vxge_mBIT(22)
4066#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN vxge_mBIT(23)
4067#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
4068#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_CTRL_STRUC vxge_mBIT(28)
4069#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_ADD_PAD vxge_mBIT(29)
4070#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_NO_SNOOP vxge_mBIT(30)
4071#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_RLX_ORD vxge_mBIT(31)
4072#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
4073#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
4074#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
4075/*0x00c18*/ u64 kdfc_trpl_fifo_1_ctrl;
4076#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
4077#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_FLIP_EN vxge_mBIT(22)
4078#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SWAP_EN vxge_mBIT(23)
4079#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
4080#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_CTRL_STRUC vxge_mBIT(28)
4081#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_ADD_PAD vxge_mBIT(29)
4082#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_NO_SNOOP vxge_mBIT(30)
4083#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_RLX_ORD vxge_mBIT(31)
4084#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
4085#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
4086#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
4087/*0x00c20*/ u64 kdfc_trpl_fifo_2_ctrl;
4088#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_FLIP_EN vxge_mBIT(22)
4089#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SWAP_EN vxge_mBIT(23)
4090#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
4091#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_CTRL_STRUC vxge_mBIT(28)
4092#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_ADD_PAD vxge_mBIT(29)
4093#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_NO_SNOOP vxge_mBIT(30)
4094#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_RLX_ORD vxge_mBIT(31)
4095#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
4096#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
4097#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
4098/*0x00c28*/ u64 kdfc_trpl_fifo_0_wb_address;
4099#define VXGE_HW_KDFC_TRPL_FIFO_0_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
4100/*0x00c30*/ u64 kdfc_trpl_fifo_1_wb_address;
4101#define VXGE_HW_KDFC_TRPL_FIFO_1_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
4102/*0x00c38*/ u64 kdfc_trpl_fifo_2_wb_address;
4103#define VXGE_HW_KDFC_TRPL_FIFO_2_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
4104/*0x00c40*/ u64 kdfc_trpl_fifo_offset;
4105#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR0(val) vxge_vBIT(val, 1, 15)
4106#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR1(val) vxge_vBIT(val, 17, 15)
4107#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR2(val) vxge_vBIT(val, 33, 15)
4108/*0x00c48*/ u64 kdfc_drbl_triplet_total;
4109#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_KDFC_MAX_SIZE(val) \
4110 vxge_vBIT(val, 17, 15)
4111 u8 unused00c60[0x00c60-0x00c50];
4112
4113/*0x00c60*/ u64 usdc_drbl_ctrl;
4114#define VXGE_HW_USDC_DRBL_CTRL_FLIP_EN vxge_mBIT(22)
4115#define VXGE_HW_USDC_DRBL_CTRL_SWAP_EN vxge_mBIT(23)
4116/*0x00c68*/ u64 usdc_vp_ready;
4117#define VXGE_HW_USDC_VP_READY_USDC_HTN_READY vxge_mBIT(7)
4118#define VXGE_HW_USDC_VP_READY_USDC_SRQ_READY vxge_mBIT(15)
4119#define VXGE_HW_USDC_VP_READY_USDC_CQRQ_READY vxge_mBIT(23)
4120/*0x00c70*/ u64 kdfc_status;
4121#define VXGE_HW_KDFC_STATUS_KDFC_WRR_0_READY vxge_mBIT(0)
4122#define VXGE_HW_KDFC_STATUS_KDFC_WRR_1_READY vxge_mBIT(1)
4123#define VXGE_HW_KDFC_STATUS_KDFC_WRR_2_READY vxge_mBIT(2)
4124 u8 unused00c80[0x00c80-0x00c78];
4125
4126/*0x00c80*/ u64 xmac_rpa_vcfg;
4127#define VXGE_HW_XMAC_RPA_VCFG_IPV4_TCP_INCL_PH vxge_mBIT(3)
4128#define VXGE_HW_XMAC_RPA_VCFG_IPV6_TCP_INCL_PH vxge_mBIT(7)
4129#define VXGE_HW_XMAC_RPA_VCFG_IPV4_UDP_INCL_PH vxge_mBIT(11)
4130#define VXGE_HW_XMAC_RPA_VCFG_IPV6_UDP_INCL_PH vxge_mBIT(15)
4131#define VXGE_HW_XMAC_RPA_VCFG_L4_INCL_CF vxge_mBIT(19)
4132#define VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG vxge_mBIT(23)
4133/*0x00c88*/ u64 rxmac_vcfg0;
4134#define VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(val) vxge_vBIT(val, 2, 14)
4135#define VXGE_HW_RXMAC_VCFG0_RTS_USE_MIN_LEN vxge_mBIT(19)
4136#define VXGE_HW_RXMAC_VCFG0_RTS_MIN_FRM_LEN(val) vxge_vBIT(val, 26, 14)
4137#define VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN vxge_mBIT(43)
4138#define VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN vxge_mBIT(47)
4139#define VXGE_HW_RXMAC_VCFG0_BCAST_EN vxge_mBIT(51)
4140#define VXGE_HW_RXMAC_VCFG0_ALL_VID_EN vxge_mBIT(55)
4141/*0x00c90*/ u64 rxmac_vcfg1;
4142#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(val) vxge_vBIT(val, 42, 2)
4143#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE vxge_mBIT(47)
4144#define VXGE_HW_RXMAC_VCFG1_CONTRIB_L2_FLOW vxge_mBIT(51)
4145/*0x00c98*/ u64 rts_access_steer_ctrl;
4146#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(val) vxge_vBIT(val, 1, 7)
4147#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(val) vxge_vBIT(val, 8, 4)
4148#define VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE vxge_mBIT(15)
4149#define VXGE_HW_RTS_ACCESS_STEER_CTRL_BEHAV_TBL_SEL vxge_mBIT(23)
4150#define VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL vxge_mBIT(27)
4151#define VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS vxge_mBIT(0)
4152#define VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(val) vxge_vBIT(val, 40, 8)
4153/*0x00ca0*/ u64 rts_access_steer_data0;
4154#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DATA(val) vxge_vBIT(val, 0, 64)
4155/*0x00ca8*/ u64 rts_access_steer_data1;
4156#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DATA(val) vxge_vBIT(val, 0, 64)
4157 u8 unused00d00[0x00d00-0x00cb0];
4158
4159/*0x00d00*/ u64 xmac_vsport_choice;
4160#define VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(val) vxge_vBIT(val, 3, 5)
4161/*0x00d08*/ u64 xmac_stats_cfg;
4162/*0x00d10*/ u64 xmac_stats_access_cmd;
4163#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(val) vxge_vBIT(val, 6, 2)
4164#define VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE vxge_mBIT(15)
4165#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
4166/*0x00d18*/ u64 xmac_stats_access_data;
4167#define VXGE_HW_XMAC_STATS_ACCESS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
4168/*0x00d20*/ u64 asic_ntwk_vp_ctrl;
4169#define VXGE_HW_ASIC_NTWK_VP_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
4170#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_SHOW_PORT_INFO vxge_mBIT(55)
4171#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_PORT_NUM vxge_mBIT(63)
4172 u8 unused00d30[0x00d30-0x00d28];
4173
4174/*0x00d30*/ u64 xgmac_vp_int_status;
4175#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT \
4176 vxge_mBIT(3)
4177/*0x00d38*/ u64 xgmac_vp_int_mask;
4178/*0x00d40*/ u64 asic_ntwk_vp_err_reg;
4179#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT vxge_mBIT(3)
4180#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK vxge_mBIT(7)
4181#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR \
4182 vxge_mBIT(11)
4183#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR \
4184 vxge_mBIT(15)
4185#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT \
4186 vxge_mBIT(19)
4187#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
4188/*0x00d48*/ u64 asic_ntwk_vp_err_mask;
4189/*0x00d50*/ u64 asic_ntwk_vp_err_alarm;
4190 u8 unused00d80[0x00d80-0x00d58];
4191
4192/*0x00d80*/ u64 rtdma_bw_ctrl;
4193#define VXGE_HW_RTDMA_BW_CTRL_BW_CTRL_EN vxge_mBIT(39)
4194#define VXGE_HW_RTDMA_BW_CTRL_DESIRED_BW(val) vxge_vBIT(val, 46, 18)
4195/*0x00d88*/ u64 rtdma_rd_optimization_ctrl;
4196#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_GEN_INT_AFTER_ABORT vxge_mBIT(3)
4197#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_MODE(val) vxge_vBIT(val, 6, 2)
4198#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_PATTERN(val) vxge_vBIT(val, 8, 8)
4199#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE vxge_mBIT(19)
4200#define VXGE_HW_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
4201#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val) \
4202 vxge_vBIT(val, 21, 3)
4203#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK_EN vxge_mBIT(28)
4204#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK(val) \
4205 vxge_vBIT(val, 29, 3)
4206#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN vxge_mBIT(35)
4207#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(val) \
4208 vxge_vBIT(val, 37, 3)
4209#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_WAIT_FOR_SPACE vxge_mBIT(43)
4210#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_FILL_THRESH(val) \
4211 vxge_vBIT(val, 51, 5)
4212#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY_EN vxge_mBIT(59)
4213#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY(val) \
4214 vxge_vBIT(val, 61, 3)
4215/*0x00d90*/ u64 pda_pcc_job_monitor;
4216#define VXGE_HW_PDA_PCC_JOB_MONITOR_PDA_PCC_JOB_STATUS vxge_mBIT(7)
4217/*0x00d98*/ u64 tx_protocol_assist_cfg;
4218#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_LSOV2_EN vxge_mBIT(6)
4219#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_IPV6_KEEP_SEARCHING vxge_mBIT(7)
4220 u8 unused01000[0x01000-0x00da0];
4221
4222/*0x01000*/ u64 tim_cfg1_int_num[4];
4223#define VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(val) vxge_vBIT(val, 6, 26)
4224#define VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN vxge_mBIT(35)
4225#define VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN vxge_mBIT(36)
4226#define VXGE_HW_TIM_CFG1_INT_NUM_TXD_CNT_EN vxge_mBIT(37)
4227#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC vxge_mBIT(38)
4228#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI vxge_mBIT(39)
4229#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(val) vxge_vBIT(val, 41, 7)
4230#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(val) vxge_vBIT(val, 49, 7)
4231#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(val) vxge_vBIT(val, 57, 7)
4232/*0x01020*/ u64 tim_cfg2_int_num[4];
4233#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(val) vxge_vBIT(val, 0, 16)
4234#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(val) vxge_vBIT(val, 16, 16)
4235#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(val) vxge_vBIT(val, 32, 16)
4236#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(val) vxge_vBIT(val, 48, 16)
4237/*0x01040*/ u64 tim_cfg3_int_num[4];
4238#define VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI vxge_mBIT(0)
4239#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(val) vxge_vBIT(val, 1, 4)
4240#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(val) vxge_vBIT(val, 6, 26)
4241#define VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(val) vxge_vBIT(val, 32, 6)
4242#define VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(val) vxge_vBIT(val, 38, 26)
4243/*0x01060*/ u64 tim_wrkld_clc;
4244#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(val) vxge_vBIT(val, 0, 32)
4245#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(val) vxge_vBIT(val, 35, 5)
4246#define VXGE_HW_TIM_WRKLD_CLC_CNT_FRM_BYTE vxge_mBIT(40)
4247#define VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(val) vxge_vBIT(val, 41, 2)
4248#define VXGE_HW_TIM_WRKLD_CLC_CNT_LNK_EN vxge_mBIT(43)
4249#define VXGE_HW_TIM_WRKLD_CLC_HOST_UTIL(val) vxge_vBIT(val, 57, 7)
4250/*0x01068*/ u64 tim_bitmap;
4251#define VXGE_HW_TIM_BITMAP_MASK(val) vxge_vBIT(val, 0, 32)
4252#define VXGE_HW_TIM_BITMAP_LLROOT_RXD_EN vxge_mBIT(32)
4253#define VXGE_HW_TIM_BITMAP_LLROOT_TXD_EN vxge_mBIT(33)
4254/*0x01070*/ u64 tim_ring_assn;
4255#define VXGE_HW_TIM_RING_ASSN_INT_NUM(val) vxge_vBIT(val, 6, 2)
4256/*0x01078*/ u64 tim_remap;
4257#define VXGE_HW_TIM_REMAP_TX_EN vxge_mBIT(5)
4258#define VXGE_HW_TIM_REMAP_RX_EN vxge_mBIT(6)
4259#define VXGE_HW_TIM_REMAP_OFFLOAD_EN vxge_mBIT(7)
4260#define VXGE_HW_TIM_REMAP_TO_VPATH_NUM(val) vxge_vBIT(val, 11, 5)
4261/*0x01080*/ u64 tim_vpath_map;
4262#define VXGE_HW_TIM_VPATH_MAP_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
4263/*0x01088*/ u64 tim_pci_cfg;
4264#define VXGE_HW_TIM_PCI_CFG_ADD_PAD vxge_mBIT(7)
4265#define VXGE_HW_TIM_PCI_CFG_NO_SNOOP vxge_mBIT(15)
4266#define VXGE_HW_TIM_PCI_CFG_RELAXED vxge_mBIT(23)
4267#define VXGE_HW_TIM_PCI_CFG_CTL_STR vxge_mBIT(31)
4268 u8 unused01100[0x01100-0x01090];
4269
4270/*0x01100*/ u64 sgrp_assign;
4271#define VXGE_HW_SGRP_ASSIGN_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 64)
4272/*0x01108*/ u64 sgrp_aoa_and_result;
4273#define VXGE_HW_SGRP_AOA_AND_RESULT_PET_SGRP_AOA_AND_RESULT(val) \
4274 vxge_vBIT(val, 0, 64)
4275/*0x01110*/ u64 rpe_pci_cfg;
4276#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_DATA_ENABLE vxge_mBIT(7)
4277#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_HDR_ENABLE vxge_mBIT(8)
4278#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_CQE_ENABLE vxge_mBIT(9)
4279#define VXGE_HW_RPE_PCI_CFG_PAD_NONLL_CQE_ENABLE vxge_mBIT(10)
4280#define VXGE_HW_RPE_PCI_CFG_PAD_BASE_LL_CQE_ENABLE vxge_mBIT(11)
4281#define VXGE_HW_RPE_PCI_CFG_PAD_LL_CQE_IDATA_ENABLE vxge_mBIT(12)
4282#define VXGE_HW_RPE_PCI_CFG_PAD_CQRQ_IR_ENABLE vxge_mBIT(13)
4283#define VXGE_HW_RPE_PCI_CFG_PAD_CQSQ_IR_ENABLE vxge_mBIT(14)
4284#define VXGE_HW_RPE_PCI_CFG_PAD_CQRR_IR_ENABLE vxge_mBIT(15)
4285#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_DATA vxge_mBIT(18)
4286#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_NONLL_CQE vxge_mBIT(19)
4287#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_LL_CQE vxge_mBIT(20)
4288#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRQ_IR vxge_mBIT(21)
4289#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQSQ_IR vxge_mBIT(22)
4290#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRR_IR vxge_mBIT(23)
4291#define VXGE_HW_RPE_PCI_CFG_RELAXED_DATA vxge_mBIT(26)
4292#define VXGE_HW_RPE_PCI_CFG_RELAXED_NONLL_CQE vxge_mBIT(27)
4293#define VXGE_HW_RPE_PCI_CFG_RELAXED_LL_CQE vxge_mBIT(28)
4294#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRQ_IR vxge_mBIT(29)
4295#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQSQ_IR vxge_mBIT(30)
4296#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRR_IR vxge_mBIT(31)
4297/*0x01118*/ u64 rpe_lro_cfg;
4298#define VXGE_HW_RPE_LRO_CFG_SUPPRESS_LRO_ETH_TRLR vxge_mBIT(7)
4299#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_SNAP_SNAPJUMBO_MRG vxge_mBIT(11)
4300#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_LLC_LLCJUMBO_MRG vxge_mBIT(15)
4301#define VXGE_HW_RPE_LRO_CFG_INCL_ACK_CNT_IN_CQE vxge_mBIT(23)
4302/*0x01120*/ u64 pe_mr2vp_ack_blk_limit;
4303#define VXGE_HW_PE_MR2VP_ACK_BLK_LIMIT_BLK_LIMIT(val) vxge_vBIT(val, 32, 32)
4304/*0x01128*/ u64 pe_mr2vp_rirr_lirr_blk_limit;
4305#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_RIRR_BLK_LIMIT(val) \
4306 vxge_vBIT(val, 0, 32)
4307#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_LIRR_BLK_LIMIT(val) \
4308 vxge_vBIT(val, 32, 32)
4309/*0x01130*/ u64 txpe_pci_nce_cfg;
4310#define VXGE_HW_TXPE_PCI_NCE_CFG_NCE_THRESH(val) vxge_vBIT(val, 0, 32)
4311#define VXGE_HW_TXPE_PCI_NCE_CFG_PAD_TOWI_ENABLE vxge_mBIT(55)
4312#define VXGE_HW_TXPE_PCI_NCE_CFG_NOSNOOP_TOWI vxge_mBIT(63)
4313 u8 unused01180[0x01180-0x01138];
4314
4315/*0x01180*/ u64 msg_qpad_en_cfg;
4316#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_BWR_READ vxge_mBIT(3)
4317#define VXGE_HW_MSG_QPAD_EN_CFG_DMQ_BWR_READ vxge_mBIT(7)
4318#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_READ vxge_mBIT(11)
4319#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_READ vxge_mBIT(15)
4320#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_MSG_WRITE vxge_mBIT(19)
4321#define VXGE_HW_MSG_QPAD_EN_CFG_UMQDMQ_IR_WRITE vxge_mBIT(23)
4322#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_WRITE vxge_mBIT(27)
4323#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_WRITE vxge_mBIT(31)
4324/*0x01188*/ u64 msg_pci_cfg;
4325#define VXGE_HW_MSG_PCI_CFG_GENDMA_NO_SNOOP vxge_mBIT(3)
4326#define VXGE_HW_MSG_PCI_CFG_UMQDMQ_IR_NO_SNOOP vxge_mBIT(7)
4327#define VXGE_HW_MSG_PCI_CFG_UMQ_NO_SNOOP vxge_mBIT(11)
4328#define VXGE_HW_MSG_PCI_CFG_DMQ_NO_SNOOP vxge_mBIT(15)
4329/*0x01190*/ u64 umqdmq_ir_init;
4330#define VXGE_HW_UMQDMQ_IR_INIT_HOST_WRITE_ADD(val) vxge_vBIT(val, 0, 64)
4331/*0x01198*/ u64 dmq_ir_int;
4332#define VXGE_HW_DMQ_IR_INT_IMMED_ENABLE vxge_mBIT(6)
4333#define VXGE_HW_DMQ_IR_INT_EVENT_ENABLE vxge_mBIT(7)
4334#define VXGE_HW_DMQ_IR_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
4335#define VXGE_HW_DMQ_IR_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
4336/*0x011a0*/ u64 dmq_bwr_init_add;
4337#define VXGE_HW_DMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
4338/*0x011a8*/ u64 dmq_bwr_init_byte;
4339#define VXGE_HW_DMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
4340/*0x011b0*/ u64 dmq_ir;
4341#define VXGE_HW_DMQ_IR_POLICY(val) vxge_vBIT(val, 0, 8)
4342/*0x011b8*/ u64 umq_int;
4343#define VXGE_HW_UMQ_INT_IMMED_ENABLE vxge_mBIT(6)
4344#define VXGE_HW_UMQ_INT_EVENT_ENABLE vxge_mBIT(7)
4345#define VXGE_HW_UMQ_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
4346#define VXGE_HW_UMQ_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
4347/*0x011c0*/ u64 umq_mr2vp_bwr_pfch_init;
4348#define VXGE_HW_UMQ_MR2VP_BWR_PFCH_INIT_NUMBER(val) vxge_vBIT(val, 0, 8)
4349/*0x011c8*/ u64 umq_bwr_pfch_ctrl;
4350#define VXGE_HW_UMQ_BWR_PFCH_CTRL_POLL_EN vxge_mBIT(3)
4351/*0x011d0*/ u64 umq_mr2vp_bwr_eol;
4352#define VXGE_HW_UMQ_MR2VP_BWR_EOL_POLL_LATENCY(val) vxge_vBIT(val, 32, 32)
4353/*0x011d8*/ u64 umq_bwr_init_add;
4354#define VXGE_HW_UMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
4355/*0x011e0*/ u64 umq_bwr_init_byte;
4356#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
4357/*0x011e8*/ u64 gendma_int;
4358/*0x011f0*/ u64 umqdmq_ir_init_notify;
4359#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3)
4360/*0x011f8*/ u64 dmq_init_notify;
4361#define VXGE_HW_DMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
4362/*0x01200*/ u64 umq_init_notify;
4363#define VXGE_HW_UMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
4364 u8 unused01380[0x01380-0x01208];
4365
4366/*0x01380*/ u64 tpa_cfg;
4367#define VXGE_HW_TPA_CFG_IGNORE_FRAME_ERR vxge_mBIT(3)
4368#define VXGE_HW_TPA_CFG_IPV6_STOP_SEARCHING vxge_mBIT(7)
4369#define VXGE_HW_TPA_CFG_L4_PSHDR_PRESENT vxge_mBIT(11)
4370#define VXGE_HW_TPA_CFG_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(15)
4371 u8 unused01400[0x01400-0x01388];
4372
4373/*0x01400*/ u64 tx_vp_reset_discarded_frms;
4374#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_TX_VP_RESET_DISCARDED_FRMS(val) \
4375 vxge_vBIT(val, 48, 16)
4376 u8 unused01480[0x01480-0x01408];
4377
4378/*0x01480*/ u64 fau_rpa_vcfg;
4379#define VXGE_HW_FAU_RPA_VCFG_L4_COMP_CSUM vxge_mBIT(7)
4380#define VXGE_HW_FAU_RPA_VCFG_L3_INCL_CF vxge_mBIT(11)
4381#define VXGE_HW_FAU_RPA_VCFG_L3_COMP_CSUM vxge_mBIT(15)
4382 u8 unused014d0[0x014d0-0x01488];
4383
4384/*0x014d0*/ u64 dbg_stats_rx_mpa;
4385#define VXGE_HW_DBG_STATS_RX_MPA_CRC_FAIL_FRMS(val) vxge_vBIT(val, 0, 16)
4386#define VXGE_HW_DBG_STATS_RX_MPA_MRK_FAIL_FRMS(val) vxge_vBIT(val, 16, 16)
4387#define VXGE_HW_DBG_STATS_RX_MPA_LEN_FAIL_FRMS(val) vxge_vBIT(val, 32, 16)
4388/*0x014d8*/ u64 dbg_stats_rx_fau;
4389#define VXGE_HW_DBG_STATS_RX_FAU_RX_WOL_FRMS(val) vxge_vBIT(val, 0, 16)
4390#define VXGE_HW_DBG_STATS_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val) \
4391 vxge_vBIT(val, 16, 16)
4392#define VXGE_HW_DBG_STATS_RX_FAU_RX_PERMITTED_FRMS(val) \
4393 vxge_vBIT(val, 32, 32)
4394 u8 unused014f0[0x014f0-0x014e0];
4395
4396/*0x014f0*/ u64 fbmc_vp_rdy;
4397#define VXGE_HW_FBMC_VP_RDY_QUEUE_SPAV_FM vxge_mBIT(0)
4398 u8 unused01e00[0x01e00-0x014f8];
4399
4400/*0x01e00*/ u64 vpath_pcipif_int_status;
4401#define \
4402VXGE_HW_VPATH_PCIPIF_INT_STATUS_SRPCIM_MSG_TO_VPATH_SRPCIM_MSG_TO_VPATH_INT \
4403 vxge_mBIT(3)
4404#define VXGE_HW_VPATH_PCIPIF_INT_STATUS_VPATH_SPARE_R1_VPATH_SPARE_R1_INT \
4405 vxge_mBIT(7)
4406/*0x01e08*/ u64 vpath_pcipif_int_mask;
4407 u8 unused01e20[0x01e20-0x01e10];
4408
4409/*0x01e20*/ u64 srpcim_msg_to_vpath_reg;
4410#define VXGE_HW_SRPCIM_MSG_TO_VPATH_REG_SWIF_SRPCIM_TO_VPATH_RMSG_INT \
4411 vxge_mBIT(3)
4412/*0x01e28*/ u64 srpcim_msg_to_vpath_mask;
4413/*0x01e30*/ u64 srpcim_msg_to_vpath_alarm;
4414 u8 unused01ea0[0x01ea0-0x01e38];
4415
4416/*0x01ea0*/ u64 vpath_to_srpcim_wmsg;
4417#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_VPATH_TO_SRPCIM_WMSG(val) \
4418 vxge_vBIT(val, 0, 64)
4419/*0x01ea8*/ u64 vpath_to_srpcim_wmsg_trig;
4420#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_TRIG_VPATH_TO_SRPCIM_WMSG_TRIG \
4421 vxge_mBIT(0)
4422 u8 unused02000[0x02000-0x01eb0];
4423
4424/*0x02000*/ u64 vpath_general_int_status;
4425#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(3)
4426#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(7)
4427#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(15)
4428#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(19)
4429/*0x02008*/ u64 vpath_general_int_mask;
4430#define VXGE_HW_VPATH_GENERAL_INT_MASK_PIC_INT vxge_mBIT(3)
4431#define VXGE_HW_VPATH_GENERAL_INT_MASK_PCI_INT vxge_mBIT(7)
4432#define VXGE_HW_VPATH_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(15)
4433#define VXGE_HW_VPATH_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(19)
4434/*0x02010*/ u64 vpath_ppif_int_status;
4435#define VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT \
4436 vxge_mBIT(3)
4437#define VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT \
4438 vxge_mBIT(7)
4439#define VXGE_HW_VPATH_PPIF_INT_STATUS_PCI_CONFIG_ERRORS_PCI_CONFIG_INT \
4440 vxge_mBIT(11)
4441#define \
4442VXGE_HW_VPATH_PPIF_INT_STATUS_MRPCIM_TO_VPATH_ALARM_MRPCIM_TO_VPATH_ALARM_INT \
4443 vxge_mBIT(15)
4444#define \
4445VXGE_HW_VPATH_PPIF_INT_STATUS_SRPCIM_TO_VPATH_ALARM_SRPCIM_TO_VPATH_ALARM_INT \
4446 vxge_mBIT(19)
4447/*0x02018*/ u64 vpath_ppif_int_mask;
4448/*0x02020*/ u64 kdfcctl_errors_reg;
4449#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR vxge_mBIT(3)
4450#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR vxge_mBIT(7)
4451#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR vxge_mBIT(11)
4452#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON vxge_mBIT(15)
4453#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON vxge_mBIT(19)
4454#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON vxge_mBIT(23)
4455#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR vxge_mBIT(31)
4456#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR vxge_mBIT(35)
4457#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR vxge_mBIT(39)
4458/*0x02028*/ u64 kdfcctl_errors_mask;
4459/*0x02030*/ u64 kdfcctl_errors_alarm;
4460 u8 unused02040[0x02040-0x02038];
4461
4462/*0x02040*/ u64 general_errors_reg;
4463#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW vxge_mBIT(3)
4464#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW vxge_mBIT(7)
4465#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW vxge_mBIT(11)
4466#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR vxge_mBIT(15)
4467#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ vxge_mBIT(19)
4468#define VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS vxge_mBIT(27)
4469#define VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(31)
4470/*0x02048*/ u64 general_errors_mask;
4471/*0x02050*/ u64 general_errors_alarm;
4472/*0x02058*/ u64 pci_config_errors_reg;
4473#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_STATUS_ERR vxge_mBIT(3)
4474#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_UNCOR_ERR vxge_mBIT(7)
4475#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_COR_ERR vxge_mBIT(11)
4476/*0x02060*/ u64 pci_config_errors_mask;
4477/*0x02068*/ u64 pci_config_errors_alarm;
4478/*0x02070*/ u64 mrpcim_to_vpath_alarm_reg;
4479#define VXGE_HW_MRPCIM_TO_VPATH_ALARM_REG_PPIF_MRPCIM_TO_VPATH_ALARM \
4480 vxge_mBIT(3)
4481/*0x02078*/ u64 mrpcim_to_vpath_alarm_mask;
4482/*0x02080*/ u64 mrpcim_to_vpath_alarm_alarm;
4483/*0x02088*/ u64 srpcim_to_vpath_alarm_reg;
4484#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_PPIF_SRPCIM_TO_VPATH_ALARM(val) \
4485 vxge_vBIT(val, 0, 17)
4486/*0x02090*/ u64 srpcim_to_vpath_alarm_mask;
4487/*0x02098*/ u64 srpcim_to_vpath_alarm_alarm;
4488 u8 unused02108[0x02108-0x020a0];
4489
4490/*0x02108*/ u64 kdfcctl_status;
4491#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_PRES(val) vxge_vBIT(val, 0, 8)
4492#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_PRES(val) vxge_vBIT(val, 8, 8)
4493#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_PRES(val) vxge_vBIT(val, 16, 8)
4494#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_OVRWR(val) vxge_vBIT(val, 24, 8)
4495#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_OVRWR(val) vxge_vBIT(val, 32, 8)
4496#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_OVRWR(val) vxge_vBIT(val, 40, 8)
4497/*0x02110*/ u64 rsthdlr_status;
4498#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_RESET vxge_mBIT(3)
4499#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_VPIN(val) vxge_vBIT(val, 6, 2)
4500/*0x02118*/ u64 fifo0_status;
4501#define VXGE_HW_FIFO0_STATUS_DBLGEN_FIFO0_RDIDX(val) vxge_vBIT(val, 0, 12)
4502/*0x02120*/ u64 fifo1_status;
4503#define VXGE_HW_FIFO1_STATUS_DBLGEN_FIFO1_RDIDX(val) vxge_vBIT(val, 0, 12)
4504/*0x02128*/ u64 fifo2_status;
4505#define VXGE_HW_FIFO2_STATUS_DBLGEN_FIFO2_RDIDX(val) vxge_vBIT(val, 0, 12)
4506 u8 unused02158[0x02158-0x02130];
4507
4508/*0x02158*/ u64 tgt_illegal_access;
4509#define VXGE_HW_TGT_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
4510 u8 unused02200[0x02200-0x02160];
4511
4512/*0x02200*/ u64 vpath_general_cfg1;
4513#define VXGE_HW_VPATH_GENERAL_CFG1_TC_VALUE(val) vxge_vBIT(val, 1, 3)
4514#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_BYTE_SWAPEN vxge_mBIT(7)
4515#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_FLIPEN vxge_mBIT(11)
4516#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN vxge_mBIT(15)
4517#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_FLIPEN vxge_mBIT(23)
4518#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_SWAPEN vxge_mBIT(51)
4519#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_FLIPEN vxge_mBIT(55)
4520#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_SWAPEN vxge_mBIT(59)
4521#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_FLIPEN vxge_mBIT(63)
4522/*0x02208*/ u64 vpath_general_cfg2;
4523#define VXGE_HW_VPATH_GENERAL_CFG2_SIZE_QUANTUM(val) vxge_vBIT(val, 1, 3)
4524/*0x02210*/ u64 vpath_general_cfg3;
4525#define VXGE_HW_VPATH_GENERAL_CFG3_IGNORE_VPATH_RST_FOR_INTA vxge_mBIT(3)
4526 u8 unused02220[0x02220-0x02218];
4527
4528/*0x02220*/ u64 kdfcctl_cfg0;
4529#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 vxge_mBIT(1)
4530#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 vxge_mBIT(2)
4531#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2 vxge_mBIT(3)
4532#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO0 vxge_mBIT(5)
4533#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO1 vxge_mBIT(6)
4534#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO2 vxge_mBIT(7)
4535#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO0 vxge_mBIT(9)
4536#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO1 vxge_mBIT(10)
4537#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO2 vxge_mBIT(11)
4538#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO0 vxge_mBIT(13)
4539#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO1 vxge_mBIT(14)
4540#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO2 vxge_mBIT(15)
4541#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO0 vxge_mBIT(17)
4542#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO1 vxge_mBIT(18)
4543#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO2 vxge_mBIT(19)
4544#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO0 vxge_mBIT(21)
4545#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO1 vxge_mBIT(22)
4546#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO2 vxge_mBIT(23)
4547#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO0 vxge_mBIT(25)
4548#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO1 vxge_mBIT(26)
4549#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO2 vxge_mBIT(27)
4550#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO0 vxge_mBIT(29)
4551#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO1 vxge_mBIT(30)
4552#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO2 vxge_mBIT(31)
4553#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO0 vxge_mBIT(33)
4554#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO1 vxge_mBIT(34)
4555#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO2 vxge_mBIT(35)
4556#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO0 vxge_mBIT(37)
4557#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO1 vxge_mBIT(38)
4558#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO2 vxge_mBIT(39)
4559
4560 u8 unused02268[0x02268-0x02228];
4561
4562/*0x02268*/ u64 stats_cfg;
4563#define VXGE_HW_STATS_CFG_START_HOST_ADDR(val) vxge_vBIT(val, 0, 57)
4564/*0x02270*/ u64 interrupt_cfg0;
4565#define VXGE_HW_INTERRUPT_CFG0_MSIX_FOR_RXTI(val) vxge_vBIT(val, 1, 7)
4566#define VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(val) vxge_vBIT(val, 9, 7)
4567#define VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(val) vxge_vBIT(val, 17, 7)
4568#define VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(val) vxge_vBIT(val, 25, 7)
4569#define VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(val) vxge_vBIT(val, 33, 7)
4570 u8 unused02280[0x02280-0x02278];
4571
4572/*0x02280*/ u64 interrupt_cfg2;
4573#define VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
4574/*0x02288*/ u64 one_shot_vect0_en;
4575#define VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN vxge_mBIT(3)
4576/*0x02290*/ u64 one_shot_vect1_en;
4577#define VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN vxge_mBIT(3)
4578/*0x02298*/ u64 one_shot_vect2_en;
4579#define VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN vxge_mBIT(3)
4580/*0x022a0*/ u64 one_shot_vect3_en;
4581#define VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN vxge_mBIT(3)
4582 u8 unused022b0[0x022b0-0x022a8];
4583
4584/*0x022b0*/ u64 pci_config_access_cfg1;
4585#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(val) vxge_vBIT(val, 0, 12)
4586#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0 vxge_mBIT(15)
4587/*0x022b8*/ u64 pci_config_access_cfg2;
4588#define VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ vxge_mBIT(0)
4589/*0x022c0*/ u64 pci_config_access_status;
4590#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR vxge_mBIT(0)
4591#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_DATA(val) vxge_vBIT(val, 32, 32)
4592 u8 unused02300[0x02300-0x022c8];
4593
4594/*0x02300*/ u64 vpath_debug_stats0;
4595#define VXGE_HW_VPATH_DEBUG_STATS0_INI_NUM_MWR_SENT(val) vxge_vBIT(val, 0, 32)
4596/*0x02308*/ u64 vpath_debug_stats1;
4597#define VXGE_HW_VPATH_DEBUG_STATS1_INI_NUM_MRD_SENT(val) vxge_vBIT(val, 0, 32)
4598/*0x02310*/ u64 vpath_debug_stats2;
4599#define VXGE_HW_VPATH_DEBUG_STATS2_INI_NUM_CPL_RCVD(val) vxge_vBIT(val, 0, 32)
4600/*0x02318*/ u64 vpath_debug_stats3;
4601#define VXGE_HW_VPATH_DEBUG_STATS3_INI_NUM_MWR_BYTE_SENT(val) \
4602 vxge_vBIT(val, 0, 64)
4603/*0x02320*/ u64 vpath_debug_stats4;
4604#define VXGE_HW_VPATH_DEBUG_STATS4_INI_NUM_CPL_BYTE_RCVD(val) \
4605 vxge_vBIT(val, 0, 64)
4606/*0x02328*/ u64 vpath_debug_stats5;
4607#define VXGE_HW_VPATH_DEBUG_STATS5_WRCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
4608/*0x02330*/ u64 vpath_debug_stats6;
4609#define VXGE_HW_VPATH_DEBUG_STATS6_RDCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
4610/*0x02338*/ u64 vpath_genstats_count01;
4611#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT1(val) \
4612 vxge_vBIT(val, 0, 32)
4613#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT0(val) \
4614 vxge_vBIT(val, 32, 32)
4615/*0x02340*/ u64 vpath_genstats_count23;
4616#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT3(val) \
4617 vxge_vBIT(val, 0, 32)
4618#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT2(val) \
4619 vxge_vBIT(val, 32, 32)
4620/*0x02348*/ u64 vpath_genstats_count4;
4621#define VXGE_HW_VPATH_GENSTATS_COUNT4_PPIF_VPATH_GENSTATS_COUNT4(val) \
4622 vxge_vBIT(val, 32, 32)
4623/*0x02350*/ u64 vpath_genstats_count5;
4624#define VXGE_HW_VPATH_GENSTATS_COUNT5_PPIF_VPATH_GENSTATS_COUNT5(val) \
4625 vxge_vBIT(val, 32, 32)
4626 u8 unused02648[0x02648-0x02358];
4627} __packed;
4628
4629#define VXGE_HW_EEPROM_SIZE (0x01 << 11)
4630
4631/* Capability lists */
4632#define VXGE_HW_PCI_EXP_LNKCAP_LNK_SPEED 0xf /* Supported Link speeds */
4633#define VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH 0x3f0 /* Supported Link speeds. */
4634#define VXGE_HW_PCI_EXP_LNKCAP_LW_RES 0x0 /* Reserved. */
4635
4636#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
new file mode 100644
index 000000000000..ad64ce0afe3f
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -0,0 +1,2514 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#include <linux/etherdevice.h>
15#include <linux/prefetch.h>
16
17#include "vxge-traffic.h"
18#include "vxge-config.h"
19#include "vxge-main.h"
20
21/*
22 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
23 * @vp: Virtual Path handle.
24 *
25 * Enable vpath interrupts. The function is to be executed the last in
26 * vpath initialization sequence.
27 *
28 * See also: vxge_hw_vpath_intr_disable()
29 */
30enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
31{
32 u64 val64;
33
34 struct __vxge_hw_virtualpath *vpath;
35 struct vxge_hw_vpath_reg __iomem *vp_reg;
36 enum vxge_hw_status status = VXGE_HW_OK;
37 if (vp == NULL) {
38 status = VXGE_HW_ERR_INVALID_HANDLE;
39 goto exit;
40 }
41
42 vpath = vp->vpath;
43
44 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
45 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
46 goto exit;
47 }
48
49 vp_reg = vpath->vp_reg;
50
51 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
52
53 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
54 &vp_reg->general_errors_reg);
55
56 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
57 &vp_reg->pci_config_errors_reg);
58
59 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
60 &vp_reg->mrpcim_to_vpath_alarm_reg);
61
62 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
63 &vp_reg->srpcim_to_vpath_alarm_reg);
64
65 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
66 &vp_reg->vpath_ppif_int_status);
67
68 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
69 &vp_reg->srpcim_msg_to_vpath_reg);
70
71 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
72 &vp_reg->vpath_pcipif_int_status);
73
74 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
75 &vp_reg->prc_alarm_reg);
76
77 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
78 &vp_reg->wrdma_alarm_status);
79
80 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
81 &vp_reg->asic_ntwk_vp_err_reg);
82
83 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
84 &vp_reg->xgmac_vp_int_status);
85
86 val64 = readq(&vp_reg->vpath_general_int_status);
87
88 /* Mask unwanted interrupts */
89
90 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
91 &vp_reg->vpath_pcipif_int_mask);
92
93 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
94 &vp_reg->srpcim_msg_to_vpath_mask);
95
96 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
97 &vp_reg->srpcim_to_vpath_alarm_mask);
98
99 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
100 &vp_reg->mrpcim_to_vpath_alarm_mask);
101
102 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
103 &vp_reg->pci_config_errors_mask);
104
105 /* Unmask the individual interrupts */
106
107 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
110 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
111 &vp_reg->general_errors_mask);
112
113 __vxge_hw_pio_mem_write32_upper(
114 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
119 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
120 &vp_reg->kdfcctl_errors_mask);
121
122 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
123
124 __vxge_hw_pio_mem_write32_upper(
125 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
126 &vp_reg->prc_alarm_mask);
127
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
129 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
130
131 if (vpath->hldev->first_vp_id != vpath->vp_id)
132 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
133 &vp_reg->asic_ntwk_vp_err_mask);
134 else
135 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
137 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
138 &vp_reg->asic_ntwk_vp_err_mask);
139
140 __vxge_hw_pio_mem_write32_upper(0,
141 &vp_reg->vpath_general_int_mask);
142exit:
143 return status;
144
145}
146
147/*
148 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
149 * @vp: Virtual Path handle.
150 *
151 * Disable vpath interrupts. The function is to be executed the last in
152 * vpath initialization sequence.
153 *
154 * See also: vxge_hw_vpath_intr_enable()
155 */
156enum vxge_hw_status vxge_hw_vpath_intr_disable(
157 struct __vxge_hw_vpath_handle *vp)
158{
159 u64 val64;
160
161 struct __vxge_hw_virtualpath *vpath;
162 enum vxge_hw_status status = VXGE_HW_OK;
163 struct vxge_hw_vpath_reg __iomem *vp_reg;
164 if (vp == NULL) {
165 status = VXGE_HW_ERR_INVALID_HANDLE;
166 goto exit;
167 }
168
169 vpath = vp->vpath;
170
171 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
172 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
173 goto exit;
174 }
175 vp_reg = vpath->vp_reg;
176
177 __vxge_hw_pio_mem_write32_upper(
178 (u32)VXGE_HW_INTR_MASK_ALL,
179 &vp_reg->vpath_general_int_mask);
180
181 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
182
183 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
184
185 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
186 &vp_reg->general_errors_mask);
187
188 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
189 &vp_reg->pci_config_errors_mask);
190
191 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
192 &vp_reg->mrpcim_to_vpath_alarm_mask);
193
194 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
195 &vp_reg->srpcim_to_vpath_alarm_mask);
196
197 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
198 &vp_reg->vpath_ppif_int_mask);
199
200 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
201 &vp_reg->srpcim_msg_to_vpath_mask);
202
203 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
204 &vp_reg->vpath_pcipif_int_mask);
205
206 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
207 &vp_reg->wrdma_alarm_mask);
208
209 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
210 &vp_reg->prc_alarm_mask);
211
212 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
213 &vp_reg->xgmac_vp_int_mask);
214
215 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
216 &vp_reg->asic_ntwk_vp_err_mask);
217
218exit:
219 return status;
220}
221
222void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
223{
224 struct vxge_hw_vpath_reg __iomem *vp_reg;
225 struct vxge_hw_vp_config *config;
226 u64 val64;
227
228 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
229 return;
230
231 vp_reg = fifo->vp_reg;
232 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
233
234 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
235 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
236 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
237 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
238 fifo->tim_tti_cfg1_saved = val64;
239 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
240 }
241}
242
243void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
244{
245 u64 val64 = ring->tim_rti_cfg1_saved;
246
247 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
248 ring->tim_rti_cfg1_saved = val64;
249 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
250}
251
252void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
253{
254 u64 val64 = fifo->tim_tti_cfg3_saved;
255 u64 timer = (fifo->rtimer * 1000) / 272;
256
257 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
258 if (timer)
259 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
260 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
261
262 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
263 /* tti_cfg3_saved is not updated again because it is
264 * initialized at one place only - init time.
265 */
266}
267
268void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
269{
270 u64 val64 = ring->tim_rti_cfg3_saved;
271 u64 timer = (ring->rtimer * 1000) / 272;
272
273 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
274 if (timer)
275 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
276 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
277
278 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
279 /* rti_cfg3_saved is not updated again because it is
280 * initialized at one place only - init time.
281 */
282}
283
284/**
285 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
286 * @channeh: Channel for rx or tx handle
287 * @msix_id: MSIX ID
288 *
289 * The function masks the msix interrupt for the given msix_id
290 *
291 * Returns: 0
292 */
293void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
294{
295
296 __vxge_hw_pio_mem_write32_upper(
297 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
298 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
299}
300
301/**
302 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
303 * @channeh: Channel for rx or tx handle
304 * @msix_id: MSI ID
305 *
306 * The function unmasks the msix interrupt for the given msix_id
307 *
308 * Returns: 0
309 */
310void
311vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
312{
313
314 __vxge_hw_pio_mem_write32_upper(
315 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
316 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
317}
318
319/**
320 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
321 * @channel: Channel for rx or tx handle
322 * @msix_id: MSI ID
323 *
324 * The function unmasks the msix interrupt for the given msix_id
325 * if configured in MSIX oneshot mode
326 *
327 * Returns: 0
328 */
329void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
330{
331 __vxge_hw_pio_mem_write32_upper(
332 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
333 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
334}
335
336/**
337 * vxge_hw_device_set_intr_type - Updates the configuration
338 * with new interrupt type.
339 * @hldev: HW device handle.
340 * @intr_mode: New interrupt type
341 */
342u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
343{
344
345 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
346 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
347 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
348 (intr_mode != VXGE_HW_INTR_MODE_DEF))
349 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
350
351 hldev->config.intr_mode = intr_mode;
352 return intr_mode;
353}
354
355/**
356 * vxge_hw_device_intr_enable - Enable interrupts.
357 * @hldev: HW device handle.
358 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
359 * the type(s) of interrupts to enable.
360 *
361 * Enable Titan interrupts. The function is to be executed the last in
362 * Titan initialization sequence.
363 *
364 * See also: vxge_hw_device_intr_disable()
365 */
366void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
367{
368 u32 i;
369 u64 val64;
370 u32 val32;
371
372 vxge_hw_device_mask_all(hldev);
373
374 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
375
376 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
377 continue;
378
379 vxge_hw_vpath_intr_enable(
380 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
381 }
382
383 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
384 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
385 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
386
387 if (val64 != 0) {
388 writeq(val64, &hldev->common_reg->tim_int_status0);
389
390 writeq(~val64, &hldev->common_reg->tim_int_mask0);
391 }
392
393 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
394 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
395
396 if (val32 != 0) {
397 __vxge_hw_pio_mem_write32_upper(val32,
398 &hldev->common_reg->tim_int_status1);
399
400 __vxge_hw_pio_mem_write32_upper(~val32,
401 &hldev->common_reg->tim_int_mask1);
402 }
403 }
404
405 val64 = readq(&hldev->common_reg->titan_general_int_status);
406
407 vxge_hw_device_unmask_all(hldev);
408}
409
410/**
411 * vxge_hw_device_intr_disable - Disable Titan interrupts.
412 * @hldev: HW device handle.
413 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
414 * the type(s) of interrupts to disable.
415 *
416 * Disable Titan interrupts.
417 *
418 * See also: vxge_hw_device_intr_enable()
419 */
420void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
421{
422 u32 i;
423
424 vxge_hw_device_mask_all(hldev);
425
426 /* mask all the tim interrupts */
427 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
428 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
429 &hldev->common_reg->tim_int_mask1);
430
431 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
432
433 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
434 continue;
435
436 vxge_hw_vpath_intr_disable(
437 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
438 }
439}
440
441/**
442 * vxge_hw_device_mask_all - Mask all device interrupts.
443 * @hldev: HW device handle.
444 *
445 * Mask all device interrupts.
446 *
447 * See also: vxge_hw_device_unmask_all()
448 */
449void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
450{
451 u64 val64;
452
453 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
454 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
455
456 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
457 &hldev->common_reg->titan_mask_all_int);
458}
459
460/**
461 * vxge_hw_device_unmask_all - Unmask all device interrupts.
462 * @hldev: HW device handle.
463 *
464 * Unmask all device interrupts.
465 *
466 * See also: vxge_hw_device_mask_all()
467 */
468void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
469{
470 u64 val64 = 0;
471
472 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
473 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
474
475 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
476 &hldev->common_reg->titan_mask_all_int);
477}
478
479/**
480 * vxge_hw_device_flush_io - Flush io writes.
481 * @hldev: HW device handle.
482 *
483 * The function performs a read operation to flush io writes.
484 *
485 * Returns: void
486 */
487void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
488{
489 u32 val32;
490
491 val32 = readl(&hldev->common_reg->titan_general_int_status);
492}
493
494/**
495 * __vxge_hw_device_handle_error - Handle error
496 * @hldev: HW device
497 * @vp_id: Vpath Id
498 * @type: Error type. Please see enum vxge_hw_event{}
499 *
500 * Handle error.
501 */
502static enum vxge_hw_status
503__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
504 enum vxge_hw_event type)
505{
506 switch (type) {
507 case VXGE_HW_EVENT_UNKNOWN:
508 break;
509 case VXGE_HW_EVENT_RESET_START:
510 case VXGE_HW_EVENT_RESET_COMPLETE:
511 case VXGE_HW_EVENT_LINK_DOWN:
512 case VXGE_HW_EVENT_LINK_UP:
513 goto out;
514 case VXGE_HW_EVENT_ALARM_CLEARED:
515 goto out;
516 case VXGE_HW_EVENT_ECCERR:
517 case VXGE_HW_EVENT_MRPCIM_ECCERR:
518 goto out;
519 case VXGE_HW_EVENT_FIFO_ERR:
520 case VXGE_HW_EVENT_VPATH_ERR:
521 case VXGE_HW_EVENT_CRITICAL_ERR:
522 case VXGE_HW_EVENT_SERR:
523 break;
524 case VXGE_HW_EVENT_SRPCIM_SERR:
525 case VXGE_HW_EVENT_MRPCIM_SERR:
526 goto out;
527 case VXGE_HW_EVENT_SLOT_FREEZE:
528 break;
529 default:
530 vxge_assert(0);
531 goto out;
532 }
533
534 /* notify driver */
535 if (hldev->uld_callbacks.crit_err)
536 hldev->uld_callbacks.crit_err(
537 (struct __vxge_hw_device *)hldev,
538 type, vp_id);
539out:
540
541 return VXGE_HW_OK;
542}
543
544/*
545 * __vxge_hw_device_handle_link_down_ind
546 * @hldev: HW device handle.
547 *
548 * Link down indication handler. The function is invoked by HW when
549 * Titan indicates that the link is down.
550 */
551static enum vxge_hw_status
552__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
553{
554 /*
555 * If the previous link state is not down, return.
556 */
557 if (hldev->link_state == VXGE_HW_LINK_DOWN)
558 goto exit;
559
560 hldev->link_state = VXGE_HW_LINK_DOWN;
561
562 /* notify driver */
563 if (hldev->uld_callbacks.link_down)
564 hldev->uld_callbacks.link_down(hldev);
565exit:
566 return VXGE_HW_OK;
567}
568
569/*
570 * __vxge_hw_device_handle_link_up_ind
571 * @hldev: HW device handle.
572 *
573 * Link up indication handler. The function is invoked by HW when
574 * Titan indicates that the link is up for programmable amount of time.
575 */
576static enum vxge_hw_status
577__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
578{
579 /*
580 * If the previous link state is not down, return.
581 */
582 if (hldev->link_state == VXGE_HW_LINK_UP)
583 goto exit;
584
585 hldev->link_state = VXGE_HW_LINK_UP;
586
587 /* notify driver */
588 if (hldev->uld_callbacks.link_up)
589 hldev->uld_callbacks.link_up(hldev);
590exit:
591 return VXGE_HW_OK;
592}
593
594/*
595 * __vxge_hw_vpath_alarm_process - Process Alarms.
596 * @vpath: Virtual Path.
597 * @skip_alarms: Do not clear the alarms
598 *
599 * Process vpath alarms.
600 *
601 */
602static enum vxge_hw_status
603__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
604 u32 skip_alarms)
605{
606 u64 val64;
607 u64 alarm_status;
608 u64 pic_status;
609 struct __vxge_hw_device *hldev = NULL;
610 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
611 u64 mask64;
612 struct vxge_hw_vpath_stats_sw_info *sw_stats;
613 struct vxge_hw_vpath_reg __iomem *vp_reg;
614
615 if (vpath == NULL) {
616 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
617 alarm_event);
618 goto out2;
619 }
620
621 hldev = vpath->hldev;
622 vp_reg = vpath->vp_reg;
623 alarm_status = readq(&vp_reg->vpath_general_int_status);
624
625 if (alarm_status == VXGE_HW_ALL_FOXES) {
626 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
627 alarm_event);
628 goto out;
629 }
630
631 sw_stats = vpath->sw_stats;
632
633 if (alarm_status & ~(
634 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
635 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
636 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
637 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
638 sw_stats->error_stats.unknown_alarms++;
639
640 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
641 alarm_event);
642 goto out;
643 }
644
645 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
646
647 val64 = readq(&vp_reg->xgmac_vp_int_status);
648
649 if (val64 &
650 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
651
652 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
653
654 if (((val64 &
655 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
656 (!(val64 &
657 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
658 ((val64 &
659 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
660 (!(val64 &
661 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
662 ))) {
663 sw_stats->error_stats.network_sustained_fault++;
664
665 writeq(
666 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
667 &vp_reg->asic_ntwk_vp_err_mask);
668
669 __vxge_hw_device_handle_link_down_ind(hldev);
670 alarm_event = VXGE_HW_SET_LEVEL(
671 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
672 }
673
674 if (((val64 &
675 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
676 (!(val64 &
677 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
678 ((val64 &
679 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
680 (!(val64 &
681 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
682 ))) {
683
684 sw_stats->error_stats.network_sustained_ok++;
685
686 writeq(
687 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
688 &vp_reg->asic_ntwk_vp_err_mask);
689
690 __vxge_hw_device_handle_link_up_ind(hldev);
691 alarm_event = VXGE_HW_SET_LEVEL(
692 VXGE_HW_EVENT_LINK_UP, alarm_event);
693 }
694
695 writeq(VXGE_HW_INTR_MASK_ALL,
696 &vp_reg->asic_ntwk_vp_err_reg);
697
698 alarm_event = VXGE_HW_SET_LEVEL(
699 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
700
701 if (skip_alarms)
702 return VXGE_HW_OK;
703 }
704 }
705
706 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
707
708 pic_status = readq(&vp_reg->vpath_ppif_int_status);
709
710 if (pic_status &
711 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
712
713 val64 = readq(&vp_reg->general_errors_reg);
714 mask64 = readq(&vp_reg->general_errors_mask);
715
716 if ((val64 &
717 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
718 ~mask64) {
719 sw_stats->error_stats.ini_serr_det++;
720
721 alarm_event = VXGE_HW_SET_LEVEL(
722 VXGE_HW_EVENT_SERR, alarm_event);
723 }
724
725 if ((val64 &
726 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
727 ~mask64) {
728 sw_stats->error_stats.dblgen_fifo0_overflow++;
729
730 alarm_event = VXGE_HW_SET_LEVEL(
731 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
732 }
733
734 if ((val64 &
735 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
736 ~mask64)
737 sw_stats->error_stats.statsb_pif_chain_error++;
738
739 if ((val64 &
740 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
741 ~mask64)
742 sw_stats->error_stats.statsb_drop_timeout++;
743
744 if ((val64 &
745 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
746 ~mask64)
747 sw_stats->error_stats.target_illegal_access++;
748
749 if (!skip_alarms) {
750 writeq(VXGE_HW_INTR_MASK_ALL,
751 &vp_reg->general_errors_reg);
752 alarm_event = VXGE_HW_SET_LEVEL(
753 VXGE_HW_EVENT_ALARM_CLEARED,
754 alarm_event);
755 }
756 }
757
758 if (pic_status &
759 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
760
761 val64 = readq(&vp_reg->kdfcctl_errors_reg);
762 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
763
764 if ((val64 &
765 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
766 ~mask64) {
767 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
768
769 alarm_event = VXGE_HW_SET_LEVEL(
770 VXGE_HW_EVENT_FIFO_ERR,
771 alarm_event);
772 }
773
774 if ((val64 &
775 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
776 ~mask64) {
777 sw_stats->error_stats.kdfcctl_fifo0_poison++;
778
779 alarm_event = VXGE_HW_SET_LEVEL(
780 VXGE_HW_EVENT_FIFO_ERR,
781 alarm_event);
782 }
783
784 if ((val64 &
785 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
786 ~mask64) {
787 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
788
789 alarm_event = VXGE_HW_SET_LEVEL(
790 VXGE_HW_EVENT_FIFO_ERR,
791 alarm_event);
792 }
793
794 if (!skip_alarms) {
795 writeq(VXGE_HW_INTR_MASK_ALL,
796 &vp_reg->kdfcctl_errors_reg);
797 alarm_event = VXGE_HW_SET_LEVEL(
798 VXGE_HW_EVENT_ALARM_CLEARED,
799 alarm_event);
800 }
801 }
802
803 }
804
805 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
806
807 val64 = readq(&vp_reg->wrdma_alarm_status);
808
809 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
810
811 val64 = readq(&vp_reg->prc_alarm_reg);
812 mask64 = readq(&vp_reg->prc_alarm_mask);
813
814 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
815 ~mask64)
816 sw_stats->error_stats.prc_ring_bumps++;
817
818 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
819 ~mask64) {
820 sw_stats->error_stats.prc_rxdcm_sc_err++;
821
822 alarm_event = VXGE_HW_SET_LEVEL(
823 VXGE_HW_EVENT_VPATH_ERR,
824 alarm_event);
825 }
826
827 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
828 & ~mask64) {
829 sw_stats->error_stats.prc_rxdcm_sc_abort++;
830
831 alarm_event = VXGE_HW_SET_LEVEL(
832 VXGE_HW_EVENT_VPATH_ERR,
833 alarm_event);
834 }
835
836 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
837 & ~mask64) {
838 sw_stats->error_stats.prc_quanta_size_err++;
839
840 alarm_event = VXGE_HW_SET_LEVEL(
841 VXGE_HW_EVENT_VPATH_ERR,
842 alarm_event);
843 }
844
845 if (!skip_alarms) {
846 writeq(VXGE_HW_INTR_MASK_ALL,
847 &vp_reg->prc_alarm_reg);
848 alarm_event = VXGE_HW_SET_LEVEL(
849 VXGE_HW_EVENT_ALARM_CLEARED,
850 alarm_event);
851 }
852 }
853 }
854out:
855 hldev->stats.sw_dev_err_stats.vpath_alarms++;
856out2:
857 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
858 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
859 return VXGE_HW_OK;
860
861 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
862
863 if (alarm_event == VXGE_HW_EVENT_SERR)
864 return VXGE_HW_ERR_CRITICAL;
865
866 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
867 VXGE_HW_ERR_SLOT_FREEZE :
868 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
869 VXGE_HW_ERR_VPATH;
870}
871
872/**
873 * vxge_hw_device_begin_irq - Begin IRQ processing.
874 * @hldev: HW device handle.
875 * @skip_alarms: Do not clear the alarms
876 * @reason: "Reason" for the interrupt, the value of Titan's
877 * general_int_status register.
878 *
879 * The function performs two actions, It first checks whether (shared IRQ) the
880 * interrupt was raised by the device. Next, it masks the device interrupts.
881 *
882 * Note:
883 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
884 * bridge. Therefore, two back-to-back interrupts are potentially possible.
885 *
886 * Returns: 0, if the interrupt is not "ours" (note that in this case the
887 * device remain enabled).
888 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
889 * status.
890 */
891enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
892 u32 skip_alarms, u64 *reason)
893{
894 u32 i;
895 u64 val64;
896 u64 adapter_status;
897 u64 vpath_mask;
898 enum vxge_hw_status ret = VXGE_HW_OK;
899
900 val64 = readq(&hldev->common_reg->titan_general_int_status);
901
902 if (unlikely(!val64)) {
903 /* not Titan interrupt */
904 *reason = 0;
905 ret = VXGE_HW_ERR_WRONG_IRQ;
906 goto exit;
907 }
908
909 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
910
911 adapter_status = readq(&hldev->common_reg->adapter_status);
912
913 if (adapter_status == VXGE_HW_ALL_FOXES) {
914
915 __vxge_hw_device_handle_error(hldev,
916 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
917 *reason = 0;
918 ret = VXGE_HW_ERR_SLOT_FREEZE;
919 goto exit;
920 }
921 }
922
923 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
924
925 *reason = val64;
926
927 vpath_mask = hldev->vpaths_deployed >>
928 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
929
930 if (val64 &
931 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
932 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
933
934 return VXGE_HW_OK;
935 }
936
937 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
938
939 if (unlikely(val64 &
940 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
941
942 enum vxge_hw_status error_level = VXGE_HW_OK;
943
944 hldev->stats.sw_dev_err_stats.vpath_alarms++;
945
946 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
947
948 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
949 continue;
950
951 ret = __vxge_hw_vpath_alarm_process(
952 &hldev->virtual_paths[i], skip_alarms);
953
954 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
955
956 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
957 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
958 break;
959 }
960
961 ret = error_level;
962 }
963exit:
964 return ret;
965}
966
967/**
968 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
969 * condition that has caused the Tx and RX interrupt.
970 * @hldev: HW device.
971 *
972 * Acknowledge (that is, clear) the condition that has caused
973 * the Tx and Rx interrupt.
974 * See also: vxge_hw_device_begin_irq(),
975 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
976 */
977void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
978{
979
980 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
981 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
982 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
983 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
984 &hldev->common_reg->tim_int_status0);
985 }
986
987 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
988 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
989 __vxge_hw_pio_mem_write32_upper(
990 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
991 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
992 &hldev->common_reg->tim_int_status1);
993 }
994}
995
996/*
997 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
998 * @channel: Channel
999 * @dtrh: Buffer to return the DTR pointer
1000 *
1001 * Allocates a dtr from the reserve array. If the reserve array is empty,
1002 * it swaps the reserve and free arrays.
1003 *
1004 */
1005static enum vxge_hw_status
1006vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1007{
1008 void **tmp_arr;
1009
1010 if (channel->reserve_ptr - channel->reserve_top > 0) {
1011_alloc_after_swap:
1012 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
1013
1014 return VXGE_HW_OK;
1015 }
1016
1017 /* switch between empty and full arrays */
1018
1019 /* the idea behind such a design is that by having free and reserved
1020 * arrays separated we basically separated irq and non-irq parts.
1021 * i.e. no additional lock need to be done when we free a resource */
1022
1023 if (channel->length - channel->free_ptr > 0) {
1024
1025 tmp_arr = channel->reserve_arr;
1026 channel->reserve_arr = channel->free_arr;
1027 channel->free_arr = tmp_arr;
1028 channel->reserve_ptr = channel->length;
1029 channel->reserve_top = channel->free_ptr;
1030 channel->free_ptr = channel->length;
1031
1032 channel->stats->reserve_free_swaps_cnt++;
1033
1034 goto _alloc_after_swap;
1035 }
1036
1037 channel->stats->full_cnt++;
1038
1039 *dtrh = NULL;
1040 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1041}
1042
1043/*
1044 * vxge_hw_channel_dtr_post - Post a dtr to the channel
1045 * @channelh: Channel
1046 * @dtrh: DTR pointer
1047 *
1048 * Posts a dtr to work array.
1049 *
1050 */
1051static void
1052vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1053{
1054 vxge_assert(channel->work_arr[channel->post_index] == NULL);
1055
1056 channel->work_arr[channel->post_index++] = dtrh;
1057
1058 /* wrap-around */
1059 if (channel->post_index == channel->length)
1060 channel->post_index = 0;
1061}
1062
1063/*
1064 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1065 * @channel: Channel
1066 * @dtr: Buffer to return the next completed DTR pointer
1067 *
1068 * Returns the next completed dtr with out removing it from work array
1069 *
1070 */
1071void
1072vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1073{
1074 vxge_assert(channel->compl_index < channel->length);
1075
1076 *dtrh = channel->work_arr[channel->compl_index];
1077 prefetch(*dtrh);
1078}
1079
1080/*
1081 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1082 * @channel: Channel handle
1083 *
1084 * Removes the next completed dtr from work array
1085 *
1086 */
1087void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1088{
1089 channel->work_arr[channel->compl_index] = NULL;
1090
1091 /* wrap-around */
1092 if (++channel->compl_index == channel->length)
1093 channel->compl_index = 0;
1094
1095 channel->stats->total_compl_cnt++;
1096}
1097
1098/*
1099 * vxge_hw_channel_dtr_free - Frees a dtr
1100 * @channel: Channel handle
1101 * @dtr: DTR pointer
1102 *
1103 * Returns the dtr to free array
1104 *
1105 */
1106void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1107{
1108 channel->free_arr[--channel->free_ptr] = dtrh;
1109}
1110
1111/*
1112 * vxge_hw_channel_dtr_count
1113 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1114 *
1115 * Retrieve number of DTRs available. This function can not be called
1116 * from data path. ring_initial_replenishi() is the only user.
1117 */
1118int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1119{
1120 return (channel->reserve_ptr - channel->reserve_top) +
1121 (channel->length - channel->free_ptr);
1122}
1123
1124/**
1125 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
1126 * @ring: Handle to the ring object used for receive
1127 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1128 * with a valid handle.
1129 *
1130 * Reserve Rx descriptor for the subsequent filling-in driver
1131 * and posting on the corresponding channel (@channelh)
1132 * via vxge_hw_ring_rxd_post().
1133 *
1134 * Returns: VXGE_HW_OK - success.
1135 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1136 *
1137 */
1138enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1139 void **rxdh)
1140{
1141 enum vxge_hw_status status;
1142 struct __vxge_hw_channel *channel;
1143
1144 channel = &ring->channel;
1145
1146 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1147
1148 if (status == VXGE_HW_OK) {
1149 struct vxge_hw_ring_rxd_1 *rxdp =
1150 (struct vxge_hw_ring_rxd_1 *)*rxdh;
1151
1152 rxdp->control_0 = rxdp->control_1 = 0;
1153 }
1154
1155 return status;
1156}
1157
1158/**
1159 * vxge_hw_ring_rxd_free - Free descriptor.
1160 * @ring: Handle to the ring object used for receive
1161 * @rxdh: Descriptor handle.
1162 *
1163 * Free the reserved descriptor. This operation is "symmetrical" to
1164 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1165 * lifecycle.
1166 *
1167 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1168 * be:
1169 *
1170 * - reserved (vxge_hw_ring_rxd_reserve);
1171 *
1172 * - posted (vxge_hw_ring_rxd_post);
1173 *
1174 * - completed (vxge_hw_ring_rxd_next_completed);
1175 *
1176 * - and recycled again (vxge_hw_ring_rxd_free).
1177 *
1178 * For alternative state transitions and more details please refer to
1179 * the design doc.
1180 *
1181 */
1182void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1183{
1184 struct __vxge_hw_channel *channel;
1185
1186 channel = &ring->channel;
1187
1188 vxge_hw_channel_dtr_free(channel, rxdh);
1189
1190}
1191
1192/**
1193 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1194 * @ring: Handle to the ring object used for receive
1195 * @rxdh: Descriptor handle.
1196 *
1197 * This routine prepares a rxd and posts
1198 */
1199void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1200{
1201 struct __vxge_hw_channel *channel;
1202
1203 channel = &ring->channel;
1204
1205 vxge_hw_channel_dtr_post(channel, rxdh);
1206}
1207
1208/**
1209 * vxge_hw_ring_rxd_post_post - Process rxd after post.
1210 * @ring: Handle to the ring object used for receive
1211 * @rxdh: Descriptor handle.
1212 *
1213 * Processes rxd after post
1214 */
1215void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1216{
1217 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1218 struct __vxge_hw_channel *channel;
1219
1220 channel = &ring->channel;
1221
1222 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1223
1224 if (ring->stats->common_stats.usage_cnt > 0)
1225 ring->stats->common_stats.usage_cnt--;
1226}
1227
1228/**
1229 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1230 * @ring: Handle to the ring object used for receive
1231 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1232 *
1233 * Post descriptor on the ring.
1234 * Prior to posting the descriptor should be filled in accordance with
1235 * Host/Titan interface specification for a given service (LL, etc.).
1236 *
1237 */
1238void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1239{
1240 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1241 struct __vxge_hw_channel *channel;
1242
1243 channel = &ring->channel;
1244
1245 wmb();
1246 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1247
1248 vxge_hw_channel_dtr_post(channel, rxdh);
1249
1250 if (ring->stats->common_stats.usage_cnt > 0)
1251 ring->stats->common_stats.usage_cnt--;
1252}
1253
1254/**
1255 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1256 * @ring: Handle to the ring object used for receive
1257 * @rxdh: Descriptor handle.
1258 *
1259 * Processes rxd after post with memory barrier.
1260 */
1261void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1262{
1263 wmb();
1264 vxge_hw_ring_rxd_post_post(ring, rxdh);
1265}
1266
1267/**
1268 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1269 * @ring: Handle to the ring object used for receive
1270 * @rxdh: Descriptor handle. Returned by HW.
1271 * @t_code: Transfer code, as per Titan User Guide,
1272 * Receive Descriptor Format. Returned by HW.
1273 *
1274 * Retrieve the _next_ completed descriptor.
1275 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1276 * driver of new completed descriptors. After that
1277 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1278 * completions (the very first completion is passed by HW via
1279 * vxge_hw_ring_callback_f).
1280 *
1281 * Implementation-wise, the driver is free to call
1282 * vxge_hw_ring_rxd_next_completed either immediately from inside the
1283 * ring callback, or in a deferred fashion and separate (from HW)
1284 * context.
1285 *
1286 * Non-zero @t_code means failure to fill-in receive buffer(s)
1287 * of the descriptor.
1288 * For instance, parity error detected during the data transfer.
1289 * In this case Titan will complete the descriptor and indicate
1290 * for the host that the received data is not to be used.
1291 * For details please refer to Titan User Guide.
1292 *
1293 * Returns: VXGE_HW_OK - success.
1294 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1295 * are currently available for processing.
1296 *
1297 * See also: vxge_hw_ring_callback_f{},
1298 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1299 */
1300enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1301 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1302{
1303 struct __vxge_hw_channel *channel;
1304 struct vxge_hw_ring_rxd_1 *rxdp;
1305 enum vxge_hw_status status = VXGE_HW_OK;
1306 u64 control_0, own;
1307
1308 channel = &ring->channel;
1309
1310 vxge_hw_channel_dtr_try_complete(channel, rxdh);
1311
1312 rxdp = *rxdh;
1313 if (rxdp == NULL) {
1314 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1315 goto exit;
1316 }
1317
1318 control_0 = rxdp->control_0;
1319 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1320 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1321
1322 /* check whether it is not the end */
1323 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1324
1325 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
1326 0);
1327
1328 ++ring->cmpl_cnt;
1329 vxge_hw_channel_dtr_complete(channel);
1330
1331 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1332
1333 ring->stats->common_stats.usage_cnt++;
1334 if (ring->stats->common_stats.usage_max <
1335 ring->stats->common_stats.usage_cnt)
1336 ring->stats->common_stats.usage_max =
1337 ring->stats->common_stats.usage_cnt;
1338
1339 status = VXGE_HW_OK;
1340 goto exit;
1341 }
1342
1343 /* reset it. since we don't want to return
1344 * garbage to the driver */
1345 *rxdh = NULL;
1346 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1347exit:
1348 return status;
1349}
1350
1351/**
1352 * vxge_hw_ring_handle_tcode - Handle transfer code.
1353 * @ring: Handle to the ring object used for receive
1354 * @rxdh: Descriptor handle.
1355 * @t_code: One of the enumerated (and documented in the Titan user guide)
1356 * "transfer codes".
1357 *
1358 * Handle descriptor's transfer code. The latter comes with each completed
1359 * descriptor.
1360 *
1361 * Returns: one of the enum vxge_hw_status{} enumerated types.
1362 * VXGE_HW_OK - for success.
1363 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1364 */
1365enum vxge_hw_status vxge_hw_ring_handle_tcode(
1366 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1367{
1368 struct __vxge_hw_channel *channel;
1369 enum vxge_hw_status status = VXGE_HW_OK;
1370
1371 channel = &ring->channel;
1372
1373 /* If the t_code is not supported and if the
1374 * t_code is other than 0x5 (unparseable packet
1375 * such as unknown UPV6 header), Drop it !!!
1376 */
1377
1378 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1379 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1380 status = VXGE_HW_OK;
1381 goto exit;
1382 }
1383
1384 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1385 status = VXGE_HW_ERR_INVALID_TCODE;
1386 goto exit;
1387 }
1388
1389 ring->stats->rxd_t_code_err_cnt[t_code]++;
1390exit:
1391 return status;
1392}
1393
1394/**
1395 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1396 *
1397 * @fifo: fifohandle
1398 * @txdl_ptr: The starting location of the TxDL in host memory
1399 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1400 * @no_snoop: No snoop flags
1401 *
1402 * This function posts a non-offload doorbell to doorbell FIFO
1403 *
1404 */
1405static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1406 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1407{
1408 struct __vxge_hw_channel *channel;
1409
1410 channel = &fifo->channel;
1411
1412 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1413 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1414 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1415 &fifo->nofl_db->control_0);
1416
1417 mmiowb();
1418
1419 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1420
1421 mmiowb();
1422}
1423
1424/**
1425 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1426 * the fifo
1427 * @fifoh: Handle to the fifo object used for non offload send
1428 */
1429u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1430{
1431 return vxge_hw_channel_dtr_count(&fifoh->channel);
1432}
1433
1434/**
1435 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1436 * @fifoh: Handle to the fifo object used for non offload send
1437 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1438 * with a valid handle.
1439 * @txdl_priv: Buffer to return the pointer to per txdl space
1440 *
1441 * Reserve a single TxDL (that is, fifo descriptor)
1442 * for the subsequent filling-in by driver)
1443 * and posting on the corresponding channel (@channelh)
1444 * via vxge_hw_fifo_txdl_post().
1445 *
1446 * Note: it is the responsibility of driver to reserve multiple descriptors
1447 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1448 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1449 *
1450 * Returns: VXGE_HW_OK - success;
1451 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1452 *
1453 */
1454enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1455 struct __vxge_hw_fifo *fifo,
1456 void **txdlh, void **txdl_priv)
1457{
1458 struct __vxge_hw_channel *channel;
1459 enum vxge_hw_status status;
1460 int i;
1461
1462 channel = &fifo->channel;
1463
1464 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1465
1466 if (status == VXGE_HW_OK) {
1467 struct vxge_hw_fifo_txd *txdp =
1468 (struct vxge_hw_fifo_txd *)*txdlh;
1469 struct __vxge_hw_fifo_txdl_priv *priv;
1470
1471 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1472
1473 /* reset the TxDL's private */
1474 priv->align_dma_offset = 0;
1475 priv->align_vaddr_start = priv->align_vaddr;
1476 priv->align_used_frags = 0;
1477 priv->frags = 0;
1478 priv->alloc_frags = fifo->config->max_frags;
1479 priv->next_txdl_priv = NULL;
1480
1481 *txdl_priv = (void *)(size_t)txdp->host_control;
1482
1483 for (i = 0; i < fifo->config->max_frags; i++) {
1484 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1485 txdp->control_0 = txdp->control_1 = 0;
1486 }
1487 }
1488
1489 return status;
1490}
1491
1492/**
1493 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1494 * descriptor.
1495 * @fifo: Handle to the fifo object used for non offload send
1496 * @txdlh: Descriptor handle.
1497 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1498 * (of buffers).
1499 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1500 * @size: Size of the data buffer (in bytes).
1501 *
1502 * This API is part of the preparation of the transmit descriptor for posting
1503 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1504 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1505 * All three APIs fill in the fields of the fifo descriptor,
1506 * in accordance with the Titan specification.
1507 *
1508 */
1509void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1510 void *txdlh, u32 frag_idx,
1511 dma_addr_t dma_pointer, u32 size)
1512{
1513 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1514 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1515 struct __vxge_hw_channel *channel;
1516
1517 channel = &fifo->channel;
1518
1519 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1520 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1521
1522 if (frag_idx != 0)
1523 txdp->control_0 = txdp->control_1 = 0;
1524 else {
1525 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1526 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1527 txdp->control_1 |= fifo->interrupt_type;
1528 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1529 fifo->tx_intr_num);
1530 if (txdl_priv->frags) {
1531 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1532 (txdl_priv->frags - 1);
1533 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1534 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1535 }
1536 }
1537
1538 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1539
1540 txdp->buffer_pointer = (u64)dma_pointer;
1541 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1542 fifo->stats->total_buffers++;
1543 txdl_priv->frags++;
1544}
1545
1546/**
1547 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1548 * @fifo: Handle to the fifo object used for non offload send
1549 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1550 * @frags: Number of contiguous buffers that are part of a single
1551 * transmit operation.
1552 *
1553 * Post descriptor on the 'fifo' type channel for transmission.
1554 * Prior to posting the descriptor should be filled in accordance with
1555 * Host/Titan interface specification for a given service (LL, etc.).
1556 *
1557 */
1558void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1559{
1560 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1561 struct vxge_hw_fifo_txd *txdp_last;
1562 struct vxge_hw_fifo_txd *txdp_first;
1563 struct __vxge_hw_channel *channel;
1564
1565 channel = &fifo->channel;
1566
1567 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1568 txdp_first = txdlh;
1569
1570 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1571 txdp_last->control_0 |=
1572 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1573 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1574
1575 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1576
1577 __vxge_hw_non_offload_db_post(fifo,
1578 (u64)txdl_priv->dma_addr,
1579 txdl_priv->frags - 1,
1580 fifo->no_snoop_bits);
1581
1582 fifo->stats->total_posts++;
1583 fifo->stats->common_stats.usage_cnt++;
1584 if (fifo->stats->common_stats.usage_max <
1585 fifo->stats->common_stats.usage_cnt)
1586 fifo->stats->common_stats.usage_max =
1587 fifo->stats->common_stats.usage_cnt;
1588}
1589
1590/**
1591 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1592 * @fifo: Handle to the fifo object used for non offload send
1593 * @txdlh: Descriptor handle. Returned by HW.
1594 * @t_code: Transfer code, as per Titan User Guide,
1595 * Transmit Descriptor Format.
1596 * Returned by HW.
1597 *
1598 * Retrieve the _next_ completed descriptor.
1599 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1600 * driver of new completed descriptors. After that
1601 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1602 * completions (the very first completion is passed by HW via
1603 * vxge_hw_channel_callback_f).
1604 *
1605 * Implementation-wise, the driver is free to call
1606 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1607 * channel callback, or in a deferred fashion and separate (from HW)
1608 * context.
1609 *
1610 * Non-zero @t_code means failure to process the descriptor.
1611 * The failure could happen, for instance, when the link is
1612 * down, in which case Titan completes the descriptor because it
1613 * is not able to send the data out.
1614 *
1615 * For details please refer to Titan User Guide.
1616 *
1617 * Returns: VXGE_HW_OK - success.
1618 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1619 * are currently available for processing.
1620 *
1621 */
1622enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1623 struct __vxge_hw_fifo *fifo, void **txdlh,
1624 enum vxge_hw_fifo_tcode *t_code)
1625{
1626 struct __vxge_hw_channel *channel;
1627 struct vxge_hw_fifo_txd *txdp;
1628 enum vxge_hw_status status = VXGE_HW_OK;
1629
1630 channel = &fifo->channel;
1631
1632 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1633
1634 txdp = *txdlh;
1635 if (txdp == NULL) {
1636 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1637 goto exit;
1638 }
1639
1640 /* check whether host owns it */
1641 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1642
1643 vxge_assert(txdp->host_control != 0);
1644
1645 vxge_hw_channel_dtr_complete(channel);
1646
1647 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1648
1649 if (fifo->stats->common_stats.usage_cnt > 0)
1650 fifo->stats->common_stats.usage_cnt--;
1651
1652 status = VXGE_HW_OK;
1653 goto exit;
1654 }
1655
1656 /* no more completions */
1657 *txdlh = NULL;
1658 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1659exit:
1660 return status;
1661}
1662
1663/**
1664 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1665 * @fifo: Handle to the fifo object used for non offload send
1666 * @txdlh: Descriptor handle.
1667 * @t_code: One of the enumerated (and documented in the Titan user guide)
1668 * "transfer codes".
1669 *
1670 * Handle descriptor's transfer code. The latter comes with each completed
1671 * descriptor.
1672 *
1673 * Returns: one of the enum vxge_hw_status{} enumerated types.
1674 * VXGE_HW_OK - for success.
1675 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1676 */
1677enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1678 void *txdlh,
1679 enum vxge_hw_fifo_tcode t_code)
1680{
1681 struct __vxge_hw_channel *channel;
1682
1683 enum vxge_hw_status status = VXGE_HW_OK;
1684 channel = &fifo->channel;
1685
1686 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1687 status = VXGE_HW_ERR_INVALID_TCODE;
1688 goto exit;
1689 }
1690
1691 fifo->stats->txd_t_code_err_cnt[t_code]++;
1692exit:
1693 return status;
1694}
1695
1696/**
1697 * vxge_hw_fifo_txdl_free - Free descriptor.
1698 * @fifo: Handle to the fifo object used for non offload send
1699 * @txdlh: Descriptor handle.
1700 *
1701 * Free the reserved descriptor. This operation is "symmetrical" to
1702 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1703 * lifecycle.
1704 *
1705 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1706 * be:
1707 *
1708 * - reserved (vxge_hw_fifo_txdl_reserve);
1709 *
1710 * - posted (vxge_hw_fifo_txdl_post);
1711 *
1712 * - completed (vxge_hw_fifo_txdl_next_completed);
1713 *
1714 * - and recycled again (vxge_hw_fifo_txdl_free).
1715 *
1716 * For alternative state transitions and more details please refer to
1717 * the design doc.
1718 *
1719 */
1720void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1721{
1722 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1723 u32 max_frags;
1724 struct __vxge_hw_channel *channel;
1725
1726 channel = &fifo->channel;
1727
1728 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1729 (struct vxge_hw_fifo_txd *)txdlh);
1730
1731 max_frags = fifo->config->max_frags;
1732
1733 vxge_hw_channel_dtr_free(channel, txdlh);
1734}
1735
1736/**
1737 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1738 * to MAC address table.
1739 * @vp: Vpath handle.
1740 * @macaddr: MAC address to be added for this vpath into the list
1741 * @macaddr_mask: MAC address mask for macaddr
1742 * @duplicate_mode: Duplicate MAC address add mode. Please see
1743 * enum vxge_hw_vpath_mac_addr_add_mode{}
1744 *
1745 * Adds the given mac address and mac address mask into the list for this
1746 * vpath.
1747 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1748 * vxge_hw_vpath_mac_addr_get_next
1749 *
1750 */
1751enum vxge_hw_status
1752vxge_hw_vpath_mac_addr_add(
1753 struct __vxge_hw_vpath_handle *vp,
1754 u8 (macaddr)[ETH_ALEN],
1755 u8 (macaddr_mask)[ETH_ALEN],
1756 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1757{
1758 u32 i;
1759 u64 data1 = 0ULL;
1760 u64 data2 = 0ULL;
1761 enum vxge_hw_status status = VXGE_HW_OK;
1762
1763 if (vp == NULL) {
1764 status = VXGE_HW_ERR_INVALID_HANDLE;
1765 goto exit;
1766 }
1767
1768 for (i = 0; i < ETH_ALEN; i++) {
1769 data1 <<= 8;
1770 data1 |= (u8)macaddr[i];
1771
1772 data2 <<= 8;
1773 data2 |= (u8)macaddr_mask[i];
1774 }
1775
1776 switch (duplicate_mode) {
1777 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1778 i = 0;
1779 break;
1780 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1781 i = 1;
1782 break;
1783 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1784 i = 2;
1785 break;
1786 default:
1787 i = 0;
1788 break;
1789 }
1790
1791 status = __vxge_hw_vpath_rts_table_set(vp,
1792 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1793 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1794 0,
1795 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1796 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1797 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1798exit:
1799 return status;
1800}
1801
1802/**
1803 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1804 * from MAC address table.
1805 * @vp: Vpath handle.
1806 * @macaddr: First MAC address entry for this vpath in the list
1807 * @macaddr_mask: MAC address mask for macaddr
1808 *
1809 * Returns the first mac address and mac address mask in the list for this
1810 * vpath.
1811 * see also: vxge_hw_vpath_mac_addr_get_next
1812 *
1813 */
1814enum vxge_hw_status
1815vxge_hw_vpath_mac_addr_get(
1816 struct __vxge_hw_vpath_handle *vp,
1817 u8 (macaddr)[ETH_ALEN],
1818 u8 (macaddr_mask)[ETH_ALEN])
1819{
1820 u32 i;
1821 u64 data1 = 0ULL;
1822 u64 data2 = 0ULL;
1823 enum vxge_hw_status status = VXGE_HW_OK;
1824
1825 if (vp == NULL) {
1826 status = VXGE_HW_ERR_INVALID_HANDLE;
1827 goto exit;
1828 }
1829
1830 status = __vxge_hw_vpath_rts_table_get(vp,
1831 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1832 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1833 0, &data1, &data2);
1834
1835 if (status != VXGE_HW_OK)
1836 goto exit;
1837
1838 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1839
1840 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1841
1842 for (i = ETH_ALEN; i > 0; i--) {
1843 macaddr[i-1] = (u8)(data1 & 0xFF);
1844 data1 >>= 8;
1845
1846 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1847 data2 >>= 8;
1848 }
1849exit:
1850 return status;
1851}
1852
1853/**
1854 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1855 * vpath
1856 * from MAC address table.
1857 * @vp: Vpath handle.
1858 * @macaddr: Next MAC address entry for this vpath in the list
1859 * @macaddr_mask: MAC address mask for macaddr
1860 *
1861 * Returns the next mac address and mac address mask in the list for this
1862 * vpath.
1863 * see also: vxge_hw_vpath_mac_addr_get
1864 *
1865 */
1866enum vxge_hw_status
1867vxge_hw_vpath_mac_addr_get_next(
1868 struct __vxge_hw_vpath_handle *vp,
1869 u8 (macaddr)[ETH_ALEN],
1870 u8 (macaddr_mask)[ETH_ALEN])
1871{
1872 u32 i;
1873 u64 data1 = 0ULL;
1874 u64 data2 = 0ULL;
1875 enum vxge_hw_status status = VXGE_HW_OK;
1876
1877 if (vp == NULL) {
1878 status = VXGE_HW_ERR_INVALID_HANDLE;
1879 goto exit;
1880 }
1881
1882 status = __vxge_hw_vpath_rts_table_get(vp,
1883 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1884 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1885 0, &data1, &data2);
1886
1887 if (status != VXGE_HW_OK)
1888 goto exit;
1889
1890 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1891
1892 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1893
1894 for (i = ETH_ALEN; i > 0; i--) {
1895 macaddr[i-1] = (u8)(data1 & 0xFF);
1896 data1 >>= 8;
1897
1898 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1899 data2 >>= 8;
1900 }
1901
1902exit:
1903 return status;
1904}
1905
1906/**
1907 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1908 * to MAC address table.
1909 * @vp: Vpath handle.
1910 * @macaddr: MAC address to be added for this vpath into the list
1911 * @macaddr_mask: MAC address mask for macaddr
1912 *
1913 * Delete the given mac address and mac address mask into the list for this
1914 * vpath.
1915 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1916 * vxge_hw_vpath_mac_addr_get_next
1917 *
1918 */
1919enum vxge_hw_status
1920vxge_hw_vpath_mac_addr_delete(
1921 struct __vxge_hw_vpath_handle *vp,
1922 u8 (macaddr)[ETH_ALEN],
1923 u8 (macaddr_mask)[ETH_ALEN])
1924{
1925 u32 i;
1926 u64 data1 = 0ULL;
1927 u64 data2 = 0ULL;
1928 enum vxge_hw_status status = VXGE_HW_OK;
1929
1930 if (vp == NULL) {
1931 status = VXGE_HW_ERR_INVALID_HANDLE;
1932 goto exit;
1933 }
1934
1935 for (i = 0; i < ETH_ALEN; i++) {
1936 data1 <<= 8;
1937 data1 |= (u8)macaddr[i];
1938
1939 data2 <<= 8;
1940 data2 |= (u8)macaddr_mask[i];
1941 }
1942
1943 status = __vxge_hw_vpath_rts_table_set(vp,
1944 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1945 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1946 0,
1947 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1948 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1949exit:
1950 return status;
1951}
1952
1953/**
1954 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1955 * to vlan id table.
1956 * @vp: Vpath handle.
1957 * @vid: vlan id to be added for this vpath into the list
1958 *
1959 * Adds the given vlan id into the list for this vpath.
1960 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1961 * vxge_hw_vpath_vid_get_next
1962 *
1963 */
1964enum vxge_hw_status
1965vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1966{
1967 enum vxge_hw_status status = VXGE_HW_OK;
1968
1969 if (vp == NULL) {
1970 status = VXGE_HW_ERR_INVALID_HANDLE;
1971 goto exit;
1972 }
1973
1974 status = __vxge_hw_vpath_rts_table_set(vp,
1975 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1976 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1977 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1978exit:
1979 return status;
1980}
1981
1982/**
1983 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1984 * from vlan id table.
1985 * @vp: Vpath handle.
1986 * @vid: Buffer to return vlan id
1987 *
1988 * Returns the first vlan id in the list for this vpath.
1989 * see also: vxge_hw_vpath_vid_get_next
1990 *
1991 */
1992enum vxge_hw_status
1993vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1994{
1995 u64 data;
1996 enum vxge_hw_status status = VXGE_HW_OK;
1997
1998 if (vp == NULL) {
1999 status = VXGE_HW_ERR_INVALID_HANDLE;
2000 goto exit;
2001 }
2002
2003 status = __vxge_hw_vpath_rts_table_get(vp,
2004 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
2005 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
2006 0, vid, &data);
2007
2008 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
2009exit:
2010 return status;
2011}
2012
2013/**
2014 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
2015 * to vlan id table.
2016 * @vp: Vpath handle.
2017 * @vid: vlan id to be added for this vpath into the list
2018 *
2019 * Adds the given vlan id into the list for this vpath.
2020 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
2021 * vxge_hw_vpath_vid_get_next
2022 *
2023 */
2024enum vxge_hw_status
2025vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
2026{
2027 enum vxge_hw_status status = VXGE_HW_OK;
2028
2029 if (vp == NULL) {
2030 status = VXGE_HW_ERR_INVALID_HANDLE;
2031 goto exit;
2032 }
2033
2034 status = __vxge_hw_vpath_rts_table_set(vp,
2035 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
2036 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
2037 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
2038exit:
2039 return status;
2040}
2041
2042/**
2043 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
2044 * @vp: Vpath handle.
2045 *
2046 * Enable promiscuous mode of Titan-e operation.
2047 *
2048 * See also: vxge_hw_vpath_promisc_disable().
2049 */
2050enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2051 struct __vxge_hw_vpath_handle *vp)
2052{
2053 u64 val64;
2054 struct __vxge_hw_virtualpath *vpath;
2055 enum vxge_hw_status status = VXGE_HW_OK;
2056
2057 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2058 status = VXGE_HW_ERR_INVALID_HANDLE;
2059 goto exit;
2060 }
2061
2062 vpath = vp->vpath;
2063
2064 /* Enable promiscuous mode for function 0 only */
2065 if (!(vpath->hldev->access_rights &
2066 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
2067 return VXGE_HW_OK;
2068
2069 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2070
2071 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2072
2073 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2074 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2075 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2076 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2077
2078 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2079 }
2080exit:
2081 return status;
2082}
2083
2084/**
2085 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2086 * @vp: Vpath handle.
2087 *
2088 * Disable promiscuous mode of Titan-e operation.
2089 *
2090 * See also: vxge_hw_vpath_promisc_enable().
2091 */
2092enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2093 struct __vxge_hw_vpath_handle *vp)
2094{
2095 u64 val64;
2096 struct __vxge_hw_virtualpath *vpath;
2097 enum vxge_hw_status status = VXGE_HW_OK;
2098
2099 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2100 status = VXGE_HW_ERR_INVALID_HANDLE;
2101 goto exit;
2102 }
2103
2104 vpath = vp->vpath;
2105
2106 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2107
2108 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2109
2110 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2111 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2112 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2113
2114 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2115 }
2116exit:
2117 return status;
2118}
2119
2120/*
2121 * vxge_hw_vpath_bcast_enable - Enable broadcast
2122 * @vp: Vpath handle.
2123 *
2124 * Enable receiving broadcasts.
2125 */
2126enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2127 struct __vxge_hw_vpath_handle *vp)
2128{
2129 u64 val64;
2130 struct __vxge_hw_virtualpath *vpath;
2131 enum vxge_hw_status status = VXGE_HW_OK;
2132
2133 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2134 status = VXGE_HW_ERR_INVALID_HANDLE;
2135 goto exit;
2136 }
2137
2138 vpath = vp->vpath;
2139
2140 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2141
2142 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2143 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2144 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2145 }
2146exit:
2147 return status;
2148}
2149
2150/**
2151 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2152 * @vp: Vpath handle.
2153 *
2154 * Enable Titan-e multicast addresses.
2155 * Returns: VXGE_HW_OK on success.
2156 *
2157 */
2158enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2159 struct __vxge_hw_vpath_handle *vp)
2160{
2161 u64 val64;
2162 struct __vxge_hw_virtualpath *vpath;
2163 enum vxge_hw_status status = VXGE_HW_OK;
2164
2165 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2166 status = VXGE_HW_ERR_INVALID_HANDLE;
2167 goto exit;
2168 }
2169
2170 vpath = vp->vpath;
2171
2172 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2173
2174 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2175 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2176 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2177 }
2178exit:
2179 return status;
2180}
2181
2182/**
2183 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
2184 * @vp: Vpath handle.
2185 *
2186 * Disable Titan-e multicast addresses.
2187 * Returns: VXGE_HW_OK - success.
2188 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2189 *
2190 */
2191enum vxge_hw_status
2192vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2193{
2194 u64 val64;
2195 struct __vxge_hw_virtualpath *vpath;
2196 enum vxge_hw_status status = VXGE_HW_OK;
2197
2198 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2199 status = VXGE_HW_ERR_INVALID_HANDLE;
2200 goto exit;
2201 }
2202
2203 vpath = vp->vpath;
2204
2205 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2206
2207 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2208 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2209 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2210 }
2211exit:
2212 return status;
2213}
2214
2215/*
2216 * vxge_hw_vpath_alarm_process - Process Alarms.
2217 * @vpath: Virtual Path.
2218 * @skip_alarms: Do not clear the alarms
2219 *
2220 * Process vpath alarms.
2221 *
2222 */
2223enum vxge_hw_status vxge_hw_vpath_alarm_process(
2224 struct __vxge_hw_vpath_handle *vp,
2225 u32 skip_alarms)
2226{
2227 enum vxge_hw_status status = VXGE_HW_OK;
2228
2229 if (vp == NULL) {
2230 status = VXGE_HW_ERR_INVALID_HANDLE;
2231 goto exit;
2232 }
2233
2234 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2235exit:
2236 return status;
2237}
2238
2239/**
2240 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2241 * alrms
2242 * @vp: Virtual Path handle.
2243 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2244 * interrupts(Can be repeated). If fifo or ring are not enabled
2245 * the MSIX vector for that should be set to 0
2246 * @alarm_msix_id: MSIX vector for alarm.
2247 *
2248 * This API will associate a given MSIX vector numbers with the four TIM
2249 * interrupts and alarm interrupt.
2250 */
2251void
2252vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2253 int alarm_msix_id)
2254{
2255 u64 val64;
2256 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2257 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2258 u32 vp_id = vp->vpath->vp_id;
2259
2260 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2261 (vp_id * 4) + tim_msix_id[0]) |
2262 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2263 (vp_id * 4) + tim_msix_id[1]);
2264
2265 writeq(val64, &vp_reg->interrupt_cfg0);
2266
2267 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2268 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2269 &vp_reg->interrupt_cfg2);
2270
2271 if (vpath->hldev->config.intr_mode ==
2272 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2273 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2274 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2275 0, 32), &vp_reg->one_shot_vect0_en);
2276 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2277 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2278 0, 32), &vp_reg->one_shot_vect1_en);
2279 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2280 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2281 0, 32), &vp_reg->one_shot_vect2_en);
2282 }
2283}
2284
2285/**
2286 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2287 * @vp: Virtual Path handle.
2288 * @msix_id: MSIX ID
2289 *
2290 * The function masks the msix interrupt for the given msix_id
2291 *
2292 * Returns: 0,
2293 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2294 * status.
2295 * See also:
2296 */
2297void
2298vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2299{
2300 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2301 __vxge_hw_pio_mem_write32_upper(
2302 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2303 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2304}
2305
2306/**
2307 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2308 * @vp: Virtual Path handle.
2309 * @msix_id: MSI ID
2310 *
2311 * The function clears the msix interrupt for the given msix_id
2312 *
2313 * Returns: 0,
2314 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2315 * status.
2316 * See also:
2317 */
2318void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2319{
2320 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2321
2322 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2323 __vxge_hw_pio_mem_write32_upper(
2324 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2325 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2326 else
2327 __vxge_hw_pio_mem_write32_upper(
2328 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2329 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2330}
2331
2332/**
2333 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2334 * @vp: Virtual Path handle.
2335 * @msix_id: MSI ID
2336 *
2337 * The function unmasks the msix interrupt for the given msix_id
2338 *
2339 * Returns: 0,
2340 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2341 * status.
2342 * See also:
2343 */
2344void
2345vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2346{
2347 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2348 __vxge_hw_pio_mem_write32_upper(
2349 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2350 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2351}
2352
2353/**
2354 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2355 * @vp: Virtual Path handle.
2356 *
2357 * Mask Tx and Rx vpath interrupts.
2358 *
2359 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2360 */
2361void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2362{
2363 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2364 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2365 u64 val64;
2366 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2367
2368 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2369 tim_int_mask1, vp->vpath->vp_id);
2370
2371 val64 = readq(&hldev->common_reg->tim_int_mask0);
2372
2373 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2374 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2375 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2376 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2377 &hldev->common_reg->tim_int_mask0);
2378 }
2379
2380 val64 = readl(&hldev->common_reg->tim_int_mask1);
2381
2382 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2383 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2384 __vxge_hw_pio_mem_write32_upper(
2385 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2386 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2387 &hldev->common_reg->tim_int_mask1);
2388 }
2389}
2390
2391/**
2392 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2393 * @vp: Virtual Path handle.
2394 *
2395 * Unmask Tx and Rx vpath interrupts.
2396 *
2397 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2398 */
2399void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2400{
2401 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2402 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2403 u64 val64;
2404 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2405
2406 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2407 tim_int_mask1, vp->vpath->vp_id);
2408
2409 val64 = readq(&hldev->common_reg->tim_int_mask0);
2410
2411 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2412 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2413 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2414 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2415 &hldev->common_reg->tim_int_mask0);
2416 }
2417
2418 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2419 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2420 __vxge_hw_pio_mem_write32_upper(
2421 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2422 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2423 &hldev->common_reg->tim_int_mask1);
2424 }
2425}
2426
2427/**
2428 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2429 * descriptors and process the same.
2430 * @ring: Handle to the ring object used for receive
2431 *
2432 * The function polls the Rx for the completed descriptors and calls
2433 * the driver via supplied completion callback.
2434 *
2435 * Returns: VXGE_HW_OK, if the polling is completed successful.
2436 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2437 * descriptors available which are yet to be processed.
2438 *
2439 * See also: vxge_hw_vpath_poll_rx()
2440 */
2441enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2442{
2443 u8 t_code;
2444 enum vxge_hw_status status = VXGE_HW_OK;
2445 void *first_rxdh;
2446 u64 val64 = 0;
2447 int new_count = 0;
2448
2449 ring->cmpl_cnt = 0;
2450
2451 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2452 if (status == VXGE_HW_OK)
2453 ring->callback(ring, first_rxdh,
2454 t_code, ring->channel.userdata);
2455
2456 if (ring->cmpl_cnt != 0) {
2457 ring->doorbell_cnt += ring->cmpl_cnt;
2458 if (ring->doorbell_cnt >= ring->rxds_limit) {
2459 /*
2460 * Each RxD is of 4 qwords, update the number of
2461 * qwords replenished
2462 */
2463 new_count = (ring->doorbell_cnt * 4);
2464
2465 /* For each block add 4 more qwords */
2466 ring->total_db_cnt += ring->doorbell_cnt;
2467 if (ring->total_db_cnt >= ring->rxds_per_block) {
2468 new_count += 4;
2469 /* Reset total count */
2470 ring->total_db_cnt %= ring->rxds_per_block;
2471 }
2472 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2473 &ring->vp_reg->prc_rxd_doorbell);
2474 val64 =
2475 readl(&ring->common_reg->titan_general_int_status);
2476 ring->doorbell_cnt = 0;
2477 }
2478 }
2479
2480 return status;
2481}
2482
2483/**
2484 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2485 * the same.
2486 * @fifo: Handle to the fifo object used for non offload send
2487 *
2488 * The function polls the Tx for the completed descriptors and calls
2489 * the driver via supplied completion callback.
2490 *
2491 * Returns: VXGE_HW_OK, if the polling is completed successful.
2492 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2493 * descriptors available which are yet to be processed.
2494 */
2495enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2496 struct sk_buff ***skb_ptr, int nr_skb,
2497 int *more)
2498{
2499 enum vxge_hw_fifo_tcode t_code;
2500 void *first_txdlh;
2501 enum vxge_hw_status status = VXGE_HW_OK;
2502 struct __vxge_hw_channel *channel;
2503
2504 channel = &fifo->channel;
2505
2506 status = vxge_hw_fifo_txdl_next_completed(fifo,
2507 &first_txdlh, &t_code);
2508 if (status == VXGE_HW_OK)
2509 if (fifo->callback(fifo, first_txdlh, t_code,
2510 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2511 status = VXGE_HW_COMPLETIONS_REMAIN;
2512
2513 return status;
2514}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
new file mode 100644
index 000000000000..4a518a3b131c
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
@@ -0,0 +1,2298 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-traffic.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef VXGE_TRAFFIC_H
15#define VXGE_TRAFFIC_H
16
17#include "vxge-reg.h"
18#include "vxge-version.h"
19
20#define VXGE_HW_DTR_MAX_T_CODE 16
21#define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
22#define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
23#define VXGE_HW_MAX_VIRTUAL_PATHS 17
24
25#define VXGE_HW_MAC_MAX_MAC_PORT_ID 2
26
27#define VXGE_HW_DEFAULT_32 0xffffffff
28/* frames sizes */
29#define VXGE_HW_HEADER_802_2_SIZE 3
30#define VXGE_HW_HEADER_SNAP_SIZE 5
31#define VXGE_HW_HEADER_VLAN_SIZE 4
32#define VXGE_HW_MAC_HEADER_MAX_SIZE \
33 (ETH_HLEN + \
34 VXGE_HW_HEADER_802_2_SIZE + \
35 VXGE_HW_HEADER_VLAN_SIZE + \
36 VXGE_HW_HEADER_SNAP_SIZE)
37
38/* 32bit alignments */
39#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2
40#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2
41#define VXGE_HW_HEADER_802_2_ALIGN 3
42#define VXGE_HW_HEADER_SNAP_ALIGN 1
43
44#define VXGE_HW_L3_CKSUM_OK 0xFFFF
45#define VXGE_HW_L4_CKSUM_OK 0xFFFF
46
47/* Forward declarations */
48struct __vxge_hw_device;
49struct __vxge_hw_vpath_handle;
50struct vxge_hw_vp_config;
51struct __vxge_hw_virtualpath;
52struct __vxge_hw_channel;
53struct __vxge_hw_fifo;
54struct __vxge_hw_ring;
55struct vxge_hw_ring_attr;
56struct vxge_hw_mempool;
57
58#ifndef TRUE
59#define TRUE 1
60#endif
61
62#ifndef FALSE
63#define FALSE 0
64#endif
65
66/*VXGE_HW_STATUS_H*/
67
68#define VXGE_HW_EVENT_BASE 0
69#define VXGE_LL_EVENT_BASE 100
70
71/**
72 * enum vxge_hw_event- Enumerates slow-path HW events.
73 * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event.
74 * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event.
75 * @VXGE_HW_EVENT_ECCERR: vpath ECC error event.
76 * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath
77 * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error.
78 * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event.
79 * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event.
80 * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event.
81 * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset
82 * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed
83 * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
84 * slot-freeze from the rest critical events (e.g. ECC) when it is
85 * impossible to PIO read "through" the bus, i.e. when getting all-foxes.
86 *
87 * enum vxge_hw_event enumerates slow-path HW eventis.
88 *
89 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
90 * vxge_uld_link_down_f{}.
91 */
92enum vxge_hw_event {
93 VXGE_HW_EVENT_UNKNOWN = 0,
94 /* HW events */
95 VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1,
96 VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2,
97 VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3,
98 VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4,
99 VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5,
100 VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6,
101 VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7,
102 VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8,
103 VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9,
104 VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10,
105 VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11,
106 VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12,
107 VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13,
108 VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14,
109};
110
111#define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b))
112
113/*
114 * struct vxge_hw_mempool_dma - Represents DMA objects passed to the
115 caller.
116 */
117struct vxge_hw_mempool_dma {
118 dma_addr_t addr;
119 struct pci_dev *handle;
120 struct pci_dev *acc_handle;
121};
122
123/*
124 * vxge_hw_mempool_item_f - Mempool item alloc/free callback
125 * @mempoolh: Memory pool handle.
126 * @memblock: Address of memory block
127 * @memblock_index: Index of memory block
128 * @item: Item that gets allocated or freed.
129 * @index: Item's index in the memory pool.
130 * @is_last: True, if this item is the last one in the pool; false - otherwise.
131 * userdata: Per-pool user context.
132 *
133 * Memory pool allocation/deallocation callback.
134 */
135
136/*
137 * struct vxge_hw_mempool - Memory pool.
138 */
139struct vxge_hw_mempool {
140
141 void (*item_func_alloc)(
142 struct vxge_hw_mempool *mempoolh,
143 u32 memblock_index,
144 struct vxge_hw_mempool_dma *dma_object,
145 u32 index,
146 u32 is_last);
147
148 void *userdata;
149 void **memblocks_arr;
150 void **memblocks_priv_arr;
151 struct vxge_hw_mempool_dma *memblocks_dma_arr;
152 struct __vxge_hw_device *devh;
153 u32 memblock_size;
154 u32 memblocks_max;
155 u32 memblocks_allocated;
156 u32 item_size;
157 u32 items_max;
158 u32 items_initial;
159 u32 items_current;
160 u32 items_per_memblock;
161 void **items_arr;
162 u32 items_priv_size;
163};
164
165#define VXGE_HW_MAX_INTR_PER_VP 4
166#define VXGE_HW_VPATH_INTR_TX 0
167#define VXGE_HW_VPATH_INTR_RX 1
168#define VXGE_HW_VPATH_INTR_EINTA 2
169#define VXGE_HW_VPATH_INTR_BMAP 3
170
171#define VXGE_HW_BLOCK_SIZE 4096
172
173/**
174 * struct vxge_hw_tim_intr_config - Titan Tim interrupt configuration.
175 * @intr_enable: Set to 1, if interrupt is enabled.
176 * @btimer_val: Boundary Timer Initialization value in units of 272 ns.
177 * @timer_ac_en: Timer Automatic Cancel. 1 : Automatic Canceling Enable: when
178 * asserted, other interrupt-generating entities will cancel the
179 * scheduled timer interrupt.
180 * @timer_ci_en: Timer Continuous Interrupt. 1 : Continuous Interrupting Enable:
181 * When asserted, an interrupt will be generated every time the
182 * boundary timer expires, even if no traffic has been transmitted
183 * on this interrupt.
184 * @timer_ri_en: Timer Consecutive (Re-) Interrupt 1 : Consecutive
185 * (Re-) Interrupt Enable: When asserted, an interrupt will be
186 * generated the next time the timer expires, even if no traffic has
187 * been transmitted on this interrupt. (This will only happen once
188 * each time that this value is written to the TIM.) This bit is
189 * cleared by H/W at the end of the current-timer-interval when
190 * the interrupt is triggered.
191 * @rtimer_val: Restriction Timer Initialization value in units of 272 ns.
192 * @util_sel: Utilization Selector. Selects which of the workload approximations
193 * to use (e.g. legacy Tx utilization, Tx/Rx utilization, host
194 * specified utilization etc.), selects one of
195 * the 17 host configured values.
196 * 0-Virtual Path 0
197 * 1-Virtual Path 1
198 * ...
199 * 16-Virtual Path 17
200 * 17-Legacy Tx network utilization, provided by TPA
201 * 18-Legacy Rx network utilization, provided by FAU
202 * 19-Average of legacy Rx and Tx utilization calculated from link
203 * utilization values.
204 * 20-31-Invalid configurations
205 * 32-Host utilization for Virtual Path 0
206 * 33-Host utilization for Virtual Path 1
207 * ...
208 * 48-Host utilization for Virtual Path 17
209 * 49-Legacy Tx network utilization, provided by TPA
210 * 50-Legacy Rx network utilization, provided by FAU
211 * 51-Average of legacy Rx and Tx utilization calculated from
212 * link utilization values.
213 * 52-63-Invalid configurations
214 * @ltimer_val: Latency Timer Initialization Value in units of 272 ns.
215 * @txd_cnt_en: TxD Return Event Count Enable. This configuration bit when set
216 * to 1 enables counting of TxD0 returns (signalled by PCC's),
217 * towards utilization event count values.
218 * @urange_a: Defines the upper limit (in percent) for this utilization range
219 * to be active. This range is considered active
220 * if 0 = UTIL = URNG_A
221 * and the UEC_A field (below) is non-zero.
222 * @uec_a: Utilization Event Count A. If this range is active, the adapter will
223 * wait until UEC_A events have occurred on the interrupt before
224 * generating an interrupt.
225 * @urange_b: Link utilization range B.
226 * @uec_b: Utilization Event Count B.
227 * @urange_c: Link utilization range C.
228 * @uec_c: Utilization Event Count C.
229 * @urange_d: Link utilization range D.
230 * @uec_d: Utilization Event Count D.
231 * Traffic Interrupt Controller Module interrupt configuration.
232 */
233struct vxge_hw_tim_intr_config {
234
235 u32 intr_enable;
236#define VXGE_HW_TIM_INTR_ENABLE 1
237#define VXGE_HW_TIM_INTR_DISABLE 0
238#define VXGE_HW_TIM_INTR_DEFAULT 0
239
240 u32 btimer_val;
241#define VXGE_HW_MIN_TIM_BTIMER_VAL 0
242#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864
243#define VXGE_HW_USE_FLASH_DEFAULT (~0)
244
245 u32 timer_ac_en;
246#define VXGE_HW_TIM_TIMER_AC_ENABLE 1
247#define VXGE_HW_TIM_TIMER_AC_DISABLE 0
248
249 u32 timer_ci_en;
250#define VXGE_HW_TIM_TIMER_CI_ENABLE 1
251#define VXGE_HW_TIM_TIMER_CI_DISABLE 0
252
253 u32 timer_ri_en;
254#define VXGE_HW_TIM_TIMER_RI_ENABLE 1
255#define VXGE_HW_TIM_TIMER_RI_DISABLE 0
256
257 u32 rtimer_val;
258#define VXGE_HW_MIN_TIM_RTIMER_VAL 0
259#define VXGE_HW_MAX_TIM_RTIMER_VAL 67108864
260
261 u32 util_sel;
262#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17
263#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18
264#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19
265#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63
266
267 u32 ltimer_val;
268#define VXGE_HW_MIN_TIM_LTIMER_VAL 0
269#define VXGE_HW_MAX_TIM_LTIMER_VAL 67108864
270
271 /* Line utilization interrupts */
272 u32 urange_a;
273#define VXGE_HW_MIN_TIM_URANGE_A 0
274#define VXGE_HW_MAX_TIM_URANGE_A 100
275
276 u32 uec_a;
277#define VXGE_HW_MIN_TIM_UEC_A 0
278#define VXGE_HW_MAX_TIM_UEC_A 65535
279
280 u32 urange_b;
281#define VXGE_HW_MIN_TIM_URANGE_B 0
282#define VXGE_HW_MAX_TIM_URANGE_B 100
283
284 u32 uec_b;
285#define VXGE_HW_MIN_TIM_UEC_B 0
286#define VXGE_HW_MAX_TIM_UEC_B 65535
287
288 u32 urange_c;
289#define VXGE_HW_MIN_TIM_URANGE_C 0
290#define VXGE_HW_MAX_TIM_URANGE_C 100
291
292 u32 uec_c;
293#define VXGE_HW_MIN_TIM_UEC_C 0
294#define VXGE_HW_MAX_TIM_UEC_C 65535
295
296 u32 uec_d;
297#define VXGE_HW_MIN_TIM_UEC_D 0
298#define VXGE_HW_MAX_TIM_UEC_D 65535
299};
300
301#define VXGE_HW_STATS_OP_READ 0
302#define VXGE_HW_STATS_OP_CLEAR_STAT 1
303#define VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS 2
304#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC 2
305#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS 3
306
307#define VXGE_HW_STATS_LOC_AGGR 17
308#define VXGE_HW_STATS_AGGRn_OFFSET 0x00720
309
310#define VXGE_HW_STATS_VPATH_TX_OFFSET 0x0
311#define VXGE_HW_STATS_VPATH_RX_OFFSET 0x00090
312
313#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET (0x001d0 >> 3)
314#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \
315 vxge_bVALn(bits, 0, 32)
316
317#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \
318 vxge_bVALn(bits, 32, 32)
319
320#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET (0x001d8 >> 3)
321#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \
322 vxge_bVALn(bits, 0, 32)
323
324#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \
325 vxge_bVALn(bits, 32, 32)
326
327/**
328 * struct vxge_hw_xmac_aggr_stats - Per-Aggregator XMAC Statistics
329 *
330 * @tx_frms: Count of data frames transmitted on this Aggregator on all
331 * its Aggregation ports. Does not include LACPDUs or Marker PDUs.
332 * However, does include frames discarded by the Distribution
333 * function.
334 * @tx_data_octets: Count of data and padding octets of frames transmitted
335 * on this Aggregator on all its Aggregation ports. Does not include
336 * octets of LACPDUs or Marker PDUs. However, does include octets of
337 * frames discarded by the Distribution function.
338 * @tx_mcast_frms: Count of data frames transmitted (to a group destination
339 * address other than the broadcast address) on this Aggregator on
340 * all its Aggregation ports. Does not include LACPDUs or Marker
341 * PDUs. However, does include frames discarded by the Distribution
342 * function.
343 * @tx_bcast_frms: Count of broadcast data frames transmitted on this Aggregator
344 * on all its Aggregation ports. Does not include LACPDUs or Marker
345 * PDUs. However, does include frames discarded by the Distribution
346 * function.
347 * @tx_discarded_frms: Count of data frames to be transmitted on this Aggregator
348 * that are discarded by the Distribution function. This occurs when
349 * conversation are allocated to different ports and have to be
350 * flushed on old ports
351 * @tx_errored_frms: Count of data frames transmitted on this Aggregator that
352 * experience transmission errors on its Aggregation ports.
353 * @rx_frms: Count of data frames received on this Aggregator on all its
354 * Aggregation ports. Does not include LACPDUs or Marker PDUs.
355 * Also, does not include frames discarded by the Collection
356 * function.
357 * @rx_data_octets: Count of data and padding octets of frames received on this
358 * Aggregator on all its Aggregation ports. Does not include octets
359 * of LACPDUs or Marker PDUs. Also, does not include
360 * octets of frames
361 * discarded by the Collection function.
362 * @rx_mcast_frms: Count of data frames received (from a group destination
363 * address other than the broadcast address) on this Aggregator on
364 * all its Aggregation ports. Does not include LACPDUs or Marker
365 * PDUs. Also, does not include frames discarded by the Collection
366 * function.
367 * @rx_bcast_frms: Count of broadcast data frames received on this Aggregator on
368 * all its Aggregation ports. Does not include LACPDUs or Marker
369 * PDUs. Also, does not include frames discarded by the Collection
370 * function.
371 * @rx_discarded_frms: Count of data frames received on this Aggregator that are
372 * discarded by the Collection function because the Collection
373 * function was disabled on the port which the frames are received.
374 * @rx_errored_frms: Count of data frames received on this Aggregator that are
375 * discarded by its Aggregation ports, or are discarded by the
376 * Collection function of the Aggregator, or that are discarded by
377 * the Aggregator due to detection of an illegal Slow Protocols PDU.
378 * @rx_unknown_slow_proto_frms: Count of data frames received on this Aggregator
379 * that are discarded by its Aggregation ports due to detection of
380 * an unknown Slow Protocols PDU.
381 *
382 * Per aggregator XMAC RX statistics.
383 */
384struct vxge_hw_xmac_aggr_stats {
385/*0x000*/ u64 tx_frms;
386/*0x008*/ u64 tx_data_octets;
387/*0x010*/ u64 tx_mcast_frms;
388/*0x018*/ u64 tx_bcast_frms;
389/*0x020*/ u64 tx_discarded_frms;
390/*0x028*/ u64 tx_errored_frms;
391/*0x030*/ u64 rx_frms;
392/*0x038*/ u64 rx_data_octets;
393/*0x040*/ u64 rx_mcast_frms;
394/*0x048*/ u64 rx_bcast_frms;
395/*0x050*/ u64 rx_discarded_frms;
396/*0x058*/ u64 rx_errored_frms;
397/*0x060*/ u64 rx_unknown_slow_proto_frms;
398} __packed;
399
400/**
401 * struct vxge_hw_xmac_port_stats - XMAC Port Statistics
402 *
403 * @tx_ttl_frms: Count of successfully transmitted MAC frames
404 * @tx_ttl_octets: Count of total octets of transmitted frames, not including
405 * framing characters (i.e. less framing bits). To determine the
406 * total octets of transmitted frames, including framing characters,
407 * multiply PORTn_TX_TTL_FRMS by 8 and add it to this stat (unless
408 * otherwise configured, this stat only counts frames that have
409 * 8 bytes of preamble for each frame). This stat can be configured
410 * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything
411 * including the preamble octets.
412 * @tx_data_octets: Count of data and padding octets of successfully transmitted
413 * frames.
414 * @tx_mcast_frms: Count of successfully transmitted frames to a group address
415 * other than the broadcast address.
416 * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
417 * group address.
418 * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
419 * Includes discarded frames that are not sent to the network.
420 * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
421 * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
422 * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
423 * are passed to the network.
424 * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent
425 * due to problems within ICMP.
426 * @tx_tcp: Count of transmitted TCP segments. Does not include segments
427 * containing retransmitted octets.
428 * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
429 * @tx_udp: Count of transmitted UDP datagrams.
430 * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
431 * generally occurs when a packet is corrupt somehow, including
432 * packets that have IP version mismatches, invalid Layer 2 control
433 * fields, etc. L3/L4 checksums are not offloaded, but the packet
434 * is still be transmitted.
435 * @tx_unknown_protocol: Increments when the TPA encounters an unknown
436 * protocol, such as a new IPv6 extension header, or an unsupported
437 * Routing Type. The packet still has a checksum calculated but it
438 * may be incorrect.
439 * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames that are transmitted.
440 * Since, the only control frames supported by this device are
441 * PAUSE frames, this register is a count of all transmitted MAC
442 * control frames.
443 * @tx_marker_pdu_frms: Count of Marker PDUs transmitted
444 * on this Aggregation port.
445 * @tx_lacpdu_frms: Count of LACPDUs transmitted on this Aggregation port.
446 * @tx_drop_ip: Count of transmitted IP datagrams that could not be passed to
447 * the network. Increments because of:
448 * 1) An internal processing error
449 * (such as an uncorrectable ECC error). 2) A frame parsing error
450 * during IP checksum calculation.
451 * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted on this
452 * Aggregation port.
453 * @tx_xgmii_char2_match: Maintains a count of the number of transmitted XGMII
454 * characters that match a pattern that is programmable through
455 * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
456 * is set to /T/ (i.e. the terminate character), thus the statistic
457 * tracks the number of transmitted Terminate characters.
458 * @tx_xgmii_char1_match: Maintains a count of the number of transmitted XGMII
459 * characters that match a pattern that is programmable through
460 * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
461 * is set to /S/ (i.e. the start character),
462 * thus the statistic tracks
463 * the number of transmitted Start characters.
464 * @tx_xgmii_column2_match: Maintains a count of the number of transmitted XGMII
465 * columns that match a pattern that is programmable through register
466 * XMAC_STATS_TX_XGMII_COLUMN2_PORTn. By default, the pattern is set
467 * to 4 x /E/ (i.e. a column containing all error characters), thus
468 * the statistic tracks the number of Error columns transmitted at
469 * any time. If XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is
470 * set to 1, then this stat increments when COLUMN2 is found within
471 * 'n' clocks after COLUMN1. Here, 'n' is defined by
472 * XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set
473 * to 0, then it means to search anywhere for COLUMN2).
474 * @tx_xgmii_column1_match: Maintains a count of the number of transmitted XGMII
475 * columns that match a pattern that is programmable through register
476 * XMAC_STATS_TX_XGMII_COLUMN1_PORTn. By default, the pattern is set
477 * to 4 x /I/ (i.e. a column containing all idle characters),
478 * thus the statistic tracks the number of transmitted Idle columns.
479 * @tx_any_err_frms: Count of transmitted frames containing any error that
480 * prevents them from being passed to the network. Increments if
481 * there is an ECC while reading the frame out of the transmit
482 * buffer. Also increments if the transmit protocol assist (TPA)
483 * block determines that the frame should not be sent.
484 * @tx_drop_frms: Count of frames that could not be sent for no other reason
485 * than internal MAC processing. Increments once whenever the
486 * transmit buffer is flushed (due to an ECC error on a memory
487 * descriptor).
488 * @rx_ttl_frms: Count of total received MAC frames, including frames received
489 * with frame-too-long, FCS, or length errors. This stat can be
490 * configured (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count
491 * everything, even "frames" as small one byte of preamble.
492 * @rx_vld_frms: Count of successfully received MAC frames. Does not include
493 * frames received with frame-too-long, FCS, or length errors.
494 * @rx_offload_frms: Count of offloaded received frames that are passed to
495 * the host.
496 * @rx_ttl_octets: Count of total octets of received frames, not including
497 * framing characters (i.e. less framing bits). To determine the
498 * total octets of received frames, including framing characters,
499 * multiply PORTn_RX_TTL_FRMS by 8 and add it to this stat (unless
500 * otherwise configured, this stat only counts frames that have 8
501 * bytes of preamble for each frame). This stat can be configured
502 * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything,
503 * even the preamble octets of "frames" as small one byte of preamble
504 * @rx_data_octets: Count of data and padding octets of successfully received
505 * frames. Does not include frames received with frame-too-long,
506 * FCS, or length errors.
507 * @rx_offload_octets: Count of total octets, not including framing
508 * characters, of offloaded received frames that are passed
509 * to the host.
510 * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
511 * nonbroadcast group address. Does not include frames received
512 * with frame-too-long, FCS, or length errors.
513 * @rx_vld_bcast_frms: Count of successfully received MAC frames containing
514 * the broadcast group address. Does not include frames received
515 * with frame-too-long, FCS, or length errors.
516 * @rx_accepted_ucast_frms: Count of successfully received frames containing
517 * a unicast address. Only includes frames that are passed to
518 * the system.
519 * @rx_accepted_nucast_frms: Count of successfully received frames containing
520 * a non-unicast (broadcast or multicast) address. Only includes
521 * frames that are passed to the system. Could include, for instance,
522 * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
523 * register is set to pass FCS-errored frames to the host.
524 * @rx_tagged_frms: Count of received frames containing a VLAN tag.
525 * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
526 * + 18 bytes (+ 22 bytes if VLAN-tagged).
527 * @rx_usized_frms: Count of received frames of length (including FCS, but not
528 * framing bits) less than 64 octets, that are otherwise well-formed.
529 * In other words, counts runts.
530 * @rx_osized_frms: Count of received frames of length (including FCS, but not
531 * framing bits) more than 1518 octets, that are otherwise
532 * well-formed. Note: If register XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING
533 * is set to 1, then "more than 1518 octets" becomes "more than 1518
534 * (1522 if VLAN-tagged) octets".
535 * @rx_frag_frms: Count of received frames of length (including FCS, but not
536 * framing bits) less than 64 octets that had bad FCS. In other
537 * words, counts fragments.
538 * @rx_jabber_frms: Count of received frames of length (including FCS, but not
539 * framing bits) more than 1518 octets that had bad FCS. In other
540 * words, counts jabbers. Note: If register
541 * XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING is set to 1, then "more than
542 * 1518 octets" becomes "more than 1518 (1522 if VLAN-tagged)
543 * octets".
544 * @rx_ttl_64_frms: Count of total received MAC frames with length (including
545 * FCS, but not framing bits) of exactly 64 octets. Includes frames
546 * received with frame-too-long, FCS, or length errors.
547 * @rx_ttl_65_127_frms: Count of total received MAC frames with length
548 * (including FCS, but not framing bits) of between 65 and 127
549 * octets inclusive. Includes frames received with frame-too-long,
550 * FCS, or length errors.
551 * @rx_ttl_128_255_frms: Count of total received MAC frames with length
552 * (including FCS, but not framing bits) of between 128 and 255
553 * octets inclusive. Includes frames received with frame-too-long,
554 * FCS, or length errors.
555 * @rx_ttl_256_511_frms: Count of total received MAC frames with length
556 * (including FCS, but not framing bits) of between 256 and 511
557 * octets inclusive. Includes frames received with frame-too-long,
558 * FCS, or length errors.
559 * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
560 * (including FCS, but not framing bits) of between 512 and 1023
561 * octets inclusive. Includes frames received with frame-too-long,
562 * FCS, or length errors.
563 * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
564 * (including FCS, but not framing bits) of between 1024 and 1518
565 * octets inclusive. Includes frames received with frame-too-long,
566 * FCS, or length errors.
567 * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
568 * (including FCS, but not framing bits) of between 1519 and 4095
569 * octets inclusive. Includes frames received with frame-too-long,
570 * FCS, or length errors.
571 * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
572 * (including FCS, but not framing bits) of between 4096 and 8191
573 * octets inclusive. Includes frames received with frame-too-long,
574 * FCS, or length errors.
575 * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
576 * (including FCS, but not framing bits) of between 8192 and
577 * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
578 * with frame-too-long, FCS, or length errors.
579 * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
580 * (including FCS, but not framing bits) exceeding
581 * RX_MAX_PYLD_LEN+18 (+22 bytes if VLAN-tagged) octets inclusive.
582 * Includes frames received with frame-too-long,
583 * FCS, or length errors.
584 * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
585 * @rx_accepted_ip: Count of received IP datagrams that
586 * are passed to the system.
587 * @rx_ip_octets: Count of number of octets in received IP datagrams. Includes
588 * errored IP datagrams.
589 * @rx_err_ip: Count of received IP datagrams containing errors. For example,
590 * bad IP checksum.
591 * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
592 * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
593 * Note: This stat contains a count of all received TCP segments,
594 * regardless of whether or not they pertain to an established
595 * connection.
596 * @rx_udp: Count of received UDP datagrams.
597 * @rx_err_tcp: Count of received TCP segments containing errors. For example,
598 * bad TCP checksum.
599 * @rx_pause_count: Count of number of pause quanta that the MAC has been in
600 * the paused state. Recall, one pause quantum equates to 512
601 * bit times.
602 * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames.
603 * @rx_unsup_ctrl_frms: Count of received MAC control frames that do not
604 * contain the PAUSE opcode. The sum of RX_PAUSE_CTRL_FRMS and
605 * this register is a count of all received MAC control frames.
606 * Note: This stat may be configured to count all layer 2 errors
607 * (i.e. length errors and FCS errors).
608 * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does
609 * not include frames received with frame-too-long or
610 * frame-too-short error.
611 * @rx_in_rng_len_err_frms: Count of received frames with a length/type field
612 * value between 46 (42 for VLAN-tagged frames) and 1500 (also 1500
613 * for VLAN-tagged frames), inclusive, that does not match the
614 * number of data octets (including pad) received. Also contains
615 * a count of received frames with a length/type field less than
616 * 46 (42 for VLAN-tagged frames) and the number of data octets
617 * (including pad) received is greater than 46 (42 for VLAN-tagged
618 * frames).
619 * @rx_out_rng_len_err_frms: Count of received frames with length/type field
620 * between 1501 and 1535 decimal, inclusive.
621 * @rx_drop_frms: Count of received frames that could not be passed to the host.
622 * See PORTn_RX_L2_MGMT_DISCARD, PORTn_RX_RPA_DISCARD,
623 * PORTn_RX_TRASH_DISCARD, PORTn_RX_RTS_DISCARD, PORTn_RX_RED_DISCARD
624 * for a list of reasons. Because the RMAC drops one frame at a time,
625 * this stat also indicates the number of drop events.
626 * @rx_discarded_frms: Count of received frames containing
627 * any error that prevents
628 * them from being passed to the system. See PORTn_RX_FCS_DISCARD,
629 * PORTn_RX_LEN_DISCARD, and PORTn_RX_SWITCH_DISCARD for a list of
630 * reasons.
631 * @rx_drop_ip: Count of received IP datagrams that could not be passed to the
632 * host. See PORTn_RX_DROP_FRMS for a list of reasons.
633 * @rx_drop_udp: Count of received UDP datagrams that are not delivered to the
634 * host. See PORTn_RX_DROP_FRMS for a list of reasons.
635 * @rx_marker_pdu_frms: Count of valid Marker PDUs received on this Aggregation
636 * port.
637 * @rx_lacpdu_frms: Count of valid LACPDUs received on this Aggregation port.
638 * @rx_unknown_pdu_frms: Count of received frames (on this Aggregation port)
639 * that carry the Slow Protocols EtherType, but contain an unknown
640 * PDU. Or frames that contain the Slow Protocols group MAC address,
641 * but do not carry the Slow Protocols EtherType.
642 * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs received on
643 * this Aggregation port.
644 * @rx_fcs_discard: Count of received frames that are discarded because the
645 * FCS check failed.
646 * @rx_illegal_pdu_frms: Count of received frames (on this Aggregation port)
647 * that carry the Slow Protocols EtherType, but contain a badly
648 * formed PDU. Or frames that carry the Slow Protocols EtherType,
649 * but contain an illegal value of Protocol Subtype.
650 * @rx_switch_discard: Count of received frames that are discarded by the
651 * internal switch because they did not have an entry in the
652 * Filtering Database. This includes frames that had an invalid
653 * destination MAC address or VLAN ID. It also includes frames are
654 * discarded because they did not satisfy the length requirements
655 * of the target VPATH.
656 * @rx_len_discard: Count of received frames that are discarded because of an
657 * invalid frame length (includes fragments, oversized frames and
658 * mismatch between frame length and length/type field). This stat
659 * can be configured
660 * (see XMAC_STATS_GLOBAL_CFG.LEN_DISCARD_HANDLING).
661 * @rx_rpa_discard: Count of received frames that were discarded because the
662 * receive protocol assist (RPA) discovered and error in the frame
663 * or was unable to parse the frame.
664 * @rx_l2_mgmt_discard: Count of Layer 2 management frames (eg. pause frames,
665 * Link Aggregation Control Protocol (LACP) frames, etc.) that are
666 * discarded.
667 * @rx_rts_discard: Count of received frames that are discarded by the receive
668 * traffic steering (RTS) logic. Includes those frame discarded
669 * because the SSC response contradicted the switch table, because
670 * the SSC timed out, or because the target queue could not fit the
671 * frame.
672 * @rx_trash_discard: Count of received frames that are discarded because
673 * receive traffic steering (RTS) steered the frame to the trash
674 * queue.
675 * @rx_buff_full_discard: Count of received frames that are discarded because
676 * internal buffers are full. Includes frames discarded because the
677 * RTS logic is waiting for an SSC lookup that has no timeout bound.
678 * Also, includes frames that are dropped because the MAC2FAU buffer
679 * is nearly full -- this can happen if the external receive buffer
680 * is full and the receive path is backing up.
681 * @rx_red_discard: Count of received frames that are discarded because of RED
682 * (Random Early Discard).
683 * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control
684 * characters occurring between times of normal data transmission
685 * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is
686 * incremented when either -
687 * 1) The Reconciliation Sublayer (RS) is expecting one control
688 * character and gets another (i.e. is expecting a Start
689 * character, but gets another control character).
690 * 2) Start control character is not in lane 0
691 * Only increments the count by one for each XGMII column.
692 * @rx_xgmii_data_err_cnt: Maintains a count of unexpected control characters
693 * during normal data transmission. If the Reconciliation Sublayer
694 * (RS) receives a control character, other than a terminate control
695 * character, during receipt of data octets then this register is
696 * incremented. Also increments if the start frame delimiter is not
697 * found in the correct location. Only increments the count by one
698 * for each XGMII column.
699 * @rx_xgmii_char1_match: Maintains a count of the number of XGMII characters
700 * that match a pattern that is programmable through register
701 * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
702 * to /E/ (i.e. the error character), thus the statistic tracks the
703 * number of Error characters received at any time.
704 * @rx_xgmii_err_sym: Count of the number of symbol errors in the received
705 * XGMII data (i.e. PHY indicates "Receive Error" on the XGMII).
706 * Only includes symbol errors that are observed between the XGMII
707 * Start Frame Delimiter and End Frame Delimiter, inclusive. And
708 * only increments the count by one for each frame.
709 * @rx_xgmii_column1_match: Maintains a count of the number of XGMII columns
710 * that match a pattern that is programmable through register
711 * XMAC_STATS_RX_XGMII_COLUMN1_PORTn. By default, the pattern is set
712 * to 4 x /E/ (i.e. a column containing all error characters), thus
713 * the statistic tracks the number of Error columns received at any
714 * time.
715 * @rx_xgmii_char2_match: Maintains a count of the number of XGMII characters
716 * that match a pattern that is programmable through register
717 * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
718 * to /E/ (i.e. the error character), thus the statistic tracks the
719 * number of Error characters received at any time.
720 * @rx_local_fault: Maintains a count of the number of times that link
721 * transitioned from "up" to "down" due to a local fault.
722 * @rx_xgmii_column2_match: Maintains a count of the number of XGMII columns
723 * that match a pattern that is programmable through register
724 * XMAC_STATS_RX_XGMII_COLUMN2_PORTn. By default, the pattern is set
725 * to 4 x /E/ (i.e. a column containing all error characters), thus
726 * the statistic tracks the number of Error columns received at any
727 * time. If XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is set
728 * to 1, then this stat increments when COLUMN2 is found within 'n'
729 * clocks after COLUMN1. Here, 'n' is defined by
730 * XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set to
731 * 0, then it means to search anywhere for COLUMN2).
732 * @rx_jettison: Count of received frames that are jettisoned because internal
733 * buffers are full.
734 * @rx_remote_fault: Maintains a count of the number of times that link
735 * transitioned from "up" to "down" due to a remote fault.
736 *
737 * XMAC Port Statistics.
738 */
739struct vxge_hw_xmac_port_stats {
740/*0x000*/ u64 tx_ttl_frms;
741/*0x008*/ u64 tx_ttl_octets;
742/*0x010*/ u64 tx_data_octets;
743/*0x018*/ u64 tx_mcast_frms;
744/*0x020*/ u64 tx_bcast_frms;
745/*0x028*/ u64 tx_ucast_frms;
746/*0x030*/ u64 tx_tagged_frms;
747/*0x038*/ u64 tx_vld_ip;
748/*0x040*/ u64 tx_vld_ip_octets;
749/*0x048*/ u64 tx_icmp;
750/*0x050*/ u64 tx_tcp;
751/*0x058*/ u64 tx_rst_tcp;
752/*0x060*/ u64 tx_udp;
753/*0x068*/ u32 tx_parse_error;
754/*0x06c*/ u32 tx_unknown_protocol;
755/*0x070*/ u64 tx_pause_ctrl_frms;
756/*0x078*/ u32 tx_marker_pdu_frms;
757/*0x07c*/ u32 tx_lacpdu_frms;
758/*0x080*/ u32 tx_drop_ip;
759/*0x084*/ u32 tx_marker_resp_pdu_frms;
760/*0x088*/ u32 tx_xgmii_char2_match;
761/*0x08c*/ u32 tx_xgmii_char1_match;
762/*0x090*/ u32 tx_xgmii_column2_match;
763/*0x094*/ u32 tx_xgmii_column1_match;
764/*0x098*/ u32 unused1;
765/*0x09c*/ u16 tx_any_err_frms;
766/*0x09e*/ u16 tx_drop_frms;
767/*0x0a0*/ u64 rx_ttl_frms;
768/*0x0a8*/ u64 rx_vld_frms;
769/*0x0b0*/ u64 rx_offload_frms;
770/*0x0b8*/ u64 rx_ttl_octets;
771/*0x0c0*/ u64 rx_data_octets;
772/*0x0c8*/ u64 rx_offload_octets;
773/*0x0d0*/ u64 rx_vld_mcast_frms;
774/*0x0d8*/ u64 rx_vld_bcast_frms;
775/*0x0e0*/ u64 rx_accepted_ucast_frms;
776/*0x0e8*/ u64 rx_accepted_nucast_frms;
777/*0x0f0*/ u64 rx_tagged_frms;
778/*0x0f8*/ u64 rx_long_frms;
779/*0x100*/ u64 rx_usized_frms;
780/*0x108*/ u64 rx_osized_frms;
781/*0x110*/ u64 rx_frag_frms;
782/*0x118*/ u64 rx_jabber_frms;
783/*0x120*/ u64 rx_ttl_64_frms;
784/*0x128*/ u64 rx_ttl_65_127_frms;
785/*0x130*/ u64 rx_ttl_128_255_frms;
786/*0x138*/ u64 rx_ttl_256_511_frms;
787/*0x140*/ u64 rx_ttl_512_1023_frms;
788/*0x148*/ u64 rx_ttl_1024_1518_frms;
789/*0x150*/ u64 rx_ttl_1519_4095_frms;
790/*0x158*/ u64 rx_ttl_4096_8191_frms;
791/*0x160*/ u64 rx_ttl_8192_max_frms;
792/*0x168*/ u64 rx_ttl_gt_max_frms;
793/*0x170*/ u64 rx_ip;
794/*0x178*/ u64 rx_accepted_ip;
795/*0x180*/ u64 rx_ip_octets;
796/*0x188*/ u64 rx_err_ip;
797/*0x190*/ u64 rx_icmp;
798/*0x198*/ u64 rx_tcp;
799/*0x1a0*/ u64 rx_udp;
800/*0x1a8*/ u64 rx_err_tcp;
801/*0x1b0*/ u64 rx_pause_count;
802/*0x1b8*/ u64 rx_pause_ctrl_frms;
803/*0x1c0*/ u64 rx_unsup_ctrl_frms;
804/*0x1c8*/ u64 rx_fcs_err_frms;
805/*0x1d0*/ u64 rx_in_rng_len_err_frms;
806/*0x1d8*/ u64 rx_out_rng_len_err_frms;
807/*0x1e0*/ u64 rx_drop_frms;
808/*0x1e8*/ u64 rx_discarded_frms;
809/*0x1f0*/ u64 rx_drop_ip;
810/*0x1f8*/ u64 rx_drop_udp;
811/*0x200*/ u32 rx_marker_pdu_frms;
812/*0x204*/ u32 rx_lacpdu_frms;
813/*0x208*/ u32 rx_unknown_pdu_frms;
814/*0x20c*/ u32 rx_marker_resp_pdu_frms;
815/*0x210*/ u32 rx_fcs_discard;
816/*0x214*/ u32 rx_illegal_pdu_frms;
817/*0x218*/ u32 rx_switch_discard;
818/*0x21c*/ u32 rx_len_discard;
819/*0x220*/ u32 rx_rpa_discard;
820/*0x224*/ u32 rx_l2_mgmt_discard;
821/*0x228*/ u32 rx_rts_discard;
822/*0x22c*/ u32 rx_trash_discard;
823/*0x230*/ u32 rx_buff_full_discard;
824/*0x234*/ u32 rx_red_discard;
825/*0x238*/ u32 rx_xgmii_ctrl_err_cnt;
826/*0x23c*/ u32 rx_xgmii_data_err_cnt;
827/*0x240*/ u32 rx_xgmii_char1_match;
828/*0x244*/ u32 rx_xgmii_err_sym;
829/*0x248*/ u32 rx_xgmii_column1_match;
830/*0x24c*/ u32 rx_xgmii_char2_match;
831/*0x250*/ u32 rx_local_fault;
832/*0x254*/ u32 rx_xgmii_column2_match;
833/*0x258*/ u32 rx_jettison;
834/*0x25c*/ u32 rx_remote_fault;
835} __packed;
836
837/**
838 * struct vxge_hw_xmac_vpath_tx_stats - XMAC Vpath Tx Statistics
839 *
840 * @tx_ttl_eth_frms: Count of successfully transmitted MAC frames.
841 * @tx_ttl_eth_octets: Count of total octets of transmitted frames,
842 * not including framing characters (i.e. less framing bits).
843 * To determine the total octets of transmitted frames, including
844 * framing characters, multiply TX_TTL_ETH_FRMS by 8 and add it to
845 * this stat (the device always prepends 8 bytes of preamble for
846 * each frame)
847 * @tx_data_octets: Count of data and padding octets of successfully transmitted
848 * frames.
849 * @tx_mcast_frms: Count of successfully transmitted frames to a group address
850 * other than the broadcast address.
851 * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
852 * group address.
853 * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
854 * Includes discarded frames that are not sent to the network.
855 * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
856 * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
857 * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
858 * are passed to the network.
859 * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent due
860 * to problems within ICMP.
861 * @tx_tcp: Count of transmitted TCP segments. Does not include segments
862 * containing retransmitted octets.
863 * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
864 * @tx_udp: Count of transmitted UDP datagrams.
865 * @tx_unknown_protocol: Increments when the TPA encounters an unknown protocol,
866 * such as a new IPv6 extension header, or an unsupported Routing
867 * Type. The packet still has a checksum calculated but it may be
868 * incorrect.
869 * @tx_lost_ip: Count of transmitted IP datagrams that could not be passed
870 * to the network. Increments because of: 1) An internal processing
871 * error (such as an uncorrectable ECC error). 2) A frame parsing
872 * error during IP checksum calculation.
873 * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
874 * generally occurs when a packet is corrupt somehow, including
875 * packets that have IP version mismatches, invalid Layer 2 control
876 * fields, etc. L3/L4 checksums are not offloaded, but the packet
877 * is still be transmitted.
878 * @tx_tcp_offload: For frames belonging to offloaded sessions only, a count
879 * of transmitted TCP segments. Does not include segments containing
880 * retransmitted octets.
881 * @tx_retx_tcp_offload: For frames belonging to offloaded sessions only, the
882 * total number of segments retransmitted. Retransmitted segments
883 * that are sourced by the host are counted by the host.
884 * @tx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
885 * of transmitted IP datagrams that could not be passed to the
886 * network.
887 *
888 * XMAC Vpath TX Statistics.
889 */
890struct vxge_hw_xmac_vpath_tx_stats {
891 u64 tx_ttl_eth_frms;
892 u64 tx_ttl_eth_octets;
893 u64 tx_data_octets;
894 u64 tx_mcast_frms;
895 u64 tx_bcast_frms;
896 u64 tx_ucast_frms;
897 u64 tx_tagged_frms;
898 u64 tx_vld_ip;
899 u64 tx_vld_ip_octets;
900 u64 tx_icmp;
901 u64 tx_tcp;
902 u64 tx_rst_tcp;
903 u64 tx_udp;
904 u32 tx_unknown_protocol;
905 u32 tx_lost_ip;
906 u32 unused1;
907 u32 tx_parse_error;
908 u64 tx_tcp_offload;
909 u64 tx_retx_tcp_offload;
910 u64 tx_lost_ip_offload;
911} __packed;
912
913/**
914 * struct vxge_hw_xmac_vpath_rx_stats - XMAC Vpath RX Statistics
915 *
916 * @rx_ttl_eth_frms: Count of successfully received MAC frames.
917 * @rx_vld_frms: Count of successfully received MAC frames. Does not include
918 * frames received with frame-too-long, FCS, or length errors.
919 * @rx_offload_frms: Count of offloaded received frames that are passed to
920 * the host.
921 * @rx_ttl_eth_octets: Count of total octets of received frames, not including
922 * framing characters (i.e. less framing bits). Only counts octets
923 * of frames that are at least 14 bytes (18 bytes for VLAN-tagged)
924 * before FCS. To determine the total octets of received frames,
925 * including framing characters, multiply RX_TTL_ETH_FRMS by 8 and
926 * add it to this stat (the stat RX_TTL_ETH_FRMS only counts frames
927 * that have the required 8 bytes of preamble).
928 * @rx_data_octets: Count of data and padding octets of successfully received
929 * frames. Does not include frames received with frame-too-long,
930 * FCS, or length errors.
931 * @rx_offload_octets: Count of total octets, not including framing characters,
932 * of offloaded received frames that are passed to the host.
933 * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
934 * nonbroadcast group address. Does not include frames received with
935 * frame-too-long, FCS, or length errors.
936 * @rx_vld_bcast_frms: Count of successfully received MAC frames containing the
937 * broadcast group address. Does not include frames received with
938 * frame-too-long, FCS, or length errors.
939 * @rx_accepted_ucast_frms: Count of successfully received frames containing
940 * a unicast address. Only includes frames that are passed to the
941 * system.
942 * @rx_accepted_nucast_frms: Count of successfully received frames containing
943 * a non-unicast (broadcast or multicast) address. Only includes
944 * frames that are passed to the system. Could include, for instance,
945 * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
946 * register is set to pass FCS-errored frames to the host.
947 * @rx_tagged_frms: Count of received frames containing a VLAN tag.
948 * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
949 * + 18 bytes (+ 22 bytes if VLAN-tagged).
950 * @rx_usized_frms: Count of received frames of length (including FCS, but not
951 * framing bits) less than 64 octets, that are otherwise well-formed.
952 * In other words, counts runts.
953 * @rx_osized_frms: Count of received frames of length (including FCS, but not
954 * framing bits) more than 1518 octets, that are otherwise
955 * well-formed.
956 * @rx_frag_frms: Count of received frames of length (including FCS, but not
957 * framing bits) less than 64 octets that had bad FCS.
958 * In other words, counts fragments.
959 * @rx_jabber_frms: Count of received frames of length (including FCS, but not
960 * framing bits) more than 1518 octets that had bad FCS. In other
961 * words, counts jabbers.
962 * @rx_ttl_64_frms: Count of total received MAC frames with length (including
963 * FCS, but not framing bits) of exactly 64 octets. Includes frames
964 * received with frame-too-long, FCS, or length errors.
965 * @rx_ttl_65_127_frms: Count of total received MAC frames
966 * with length (including
967 * FCS, but not framing bits) of between 65 and 127 octets inclusive.
968 * Includes frames received with frame-too-long, FCS,
969 * or length errors.
970 * @rx_ttl_128_255_frms: Count of total received MAC frames with length
971 * (including FCS, but not framing bits)
972 * of between 128 and 255 octets
973 * inclusive. Includes frames received with frame-too-long, FCS,
974 * or length errors.
975 * @rx_ttl_256_511_frms: Count of total received MAC frames with length
976 * (including FCS, but not framing bits)
977 * of between 256 and 511 octets
978 * inclusive. Includes frames received with frame-too-long, FCS, or
979 * length errors.
980 * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
981 * (including FCS, but not framing bits) of between 512 and 1023
982 * octets inclusive. Includes frames received with frame-too-long,
983 * FCS, or length errors.
984 * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
985 * (including FCS, but not framing bits) of between 1024 and 1518
986 * octets inclusive. Includes frames received with frame-too-long,
987 * FCS, or length errors.
988 * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
989 * (including FCS, but not framing bits) of between 1519 and 4095
990 * octets inclusive. Includes frames received with frame-too-long,
991 * FCS, or length errors.
992 * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
993 * (including FCS, but not framing bits) of between 4096 and 8191
994 * octets inclusive. Includes frames received with frame-too-long,
995 * FCS, or length errors.
996 * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
997 * (including FCS, but not framing bits) of between 8192 and
998 * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
999 * with frame-too-long, FCS, or length errors.
1000 * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
1001 * (including FCS, but not framing bits) exceeding RX_MAX_PYLD_LEN+18
1002 * (+22 bytes if VLAN-tagged) octets inclusive. Includes frames
1003 * received with frame-too-long, FCS, or length errors.
1004 * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
1005 * @rx_accepted_ip: Count of received IP datagrams that
1006 * are passed to the system.
1007 * @rx_ip_octets: Count of number of octets in received IP datagrams.
1008 * Includes errored IP datagrams.
1009 * @rx_err_ip: Count of received IP datagrams containing errors. For example,
1010 * bad IP checksum.
1011 * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
1012 * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
1013 * Note: This stat contains a count of all received TCP segments,
1014 * regardless of whether or not they pertain to an established
1015 * connection.
1016 * @rx_udp: Count of received UDP datagrams.
1017 * @rx_err_tcp: Count of received TCP segments containing errors. For example,
1018 * bad TCP checksum.
1019 * @rx_lost_frms: Count of received frames that could not be passed to the host.
1020 * See RX_QUEUE_FULL_DISCARD and RX_RED_DISCARD
1021 * for a list of reasons.
1022 * @rx_lost_ip: Count of received IP datagrams that could not be passed to
1023 * the host. See RX_LOST_FRMS for a list of reasons.
1024 * @rx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
1025 * of received IP datagrams that could not be passed to the host.
1026 * See RX_LOST_FRMS for a list of reasons.
1027 * @rx_various_discard: Count of received frames that are discarded because
1028 * the target receive queue is full.
1029 * @rx_sleep_discard: Count of received frames that are discarded because the
1030 * target VPATH is asleep (a Wake-on-LAN magic packet can be used
1031 * to awaken the VPATH).
1032 * @rx_red_discard: Count of received frames that are discarded because of RED
1033 * (Random Early Discard).
1034 * @rx_queue_full_discard: Count of received frames that are discarded because
1035 * the target receive queue is full.
1036 * @rx_mpa_ok_frms: Count of received frames that pass the MPA checks.
1037 *
1038 * XMAC Vpath RX Statistics.
1039 */
1040struct vxge_hw_xmac_vpath_rx_stats {
1041 u64 rx_ttl_eth_frms;
1042 u64 rx_vld_frms;
1043 u64 rx_offload_frms;
1044 u64 rx_ttl_eth_octets;
1045 u64 rx_data_octets;
1046 u64 rx_offload_octets;
1047 u64 rx_vld_mcast_frms;
1048 u64 rx_vld_bcast_frms;
1049 u64 rx_accepted_ucast_frms;
1050 u64 rx_accepted_nucast_frms;
1051 u64 rx_tagged_frms;
1052 u64 rx_long_frms;
1053 u64 rx_usized_frms;
1054 u64 rx_osized_frms;
1055 u64 rx_frag_frms;
1056 u64 rx_jabber_frms;
1057 u64 rx_ttl_64_frms;
1058 u64 rx_ttl_65_127_frms;
1059 u64 rx_ttl_128_255_frms;
1060 u64 rx_ttl_256_511_frms;
1061 u64 rx_ttl_512_1023_frms;
1062 u64 rx_ttl_1024_1518_frms;
1063 u64 rx_ttl_1519_4095_frms;
1064 u64 rx_ttl_4096_8191_frms;
1065 u64 rx_ttl_8192_max_frms;
1066 u64 rx_ttl_gt_max_frms;
1067 u64 rx_ip;
1068 u64 rx_accepted_ip;
1069 u64 rx_ip_octets;
1070 u64 rx_err_ip;
1071 u64 rx_icmp;
1072 u64 rx_tcp;
1073 u64 rx_udp;
1074 u64 rx_err_tcp;
1075 u64 rx_lost_frms;
1076 u64 rx_lost_ip;
1077 u64 rx_lost_ip_offload;
1078 u16 rx_various_discard;
1079 u16 rx_sleep_discard;
1080 u16 rx_red_discard;
1081 u16 rx_queue_full_discard;
1082 u64 rx_mpa_ok_frms;
1083} __packed;
1084
1085/**
1086 * struct vxge_hw_xmac_stats - XMAC Statistics
1087 *
1088 * @aggr_stats: Statistics on aggregate port(port 0, port 1)
1089 * @port_stats: Staticstics on ports(wire 0, wire 1, lag)
1090 * @vpath_tx_stats: Per vpath XMAC TX stats
1091 * @vpath_rx_stats: Per vpath XMAC RX stats
1092 *
1093 * XMAC Statistics.
1094 */
1095struct vxge_hw_xmac_stats {
1096 struct vxge_hw_xmac_aggr_stats
1097 aggr_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID];
1098 struct vxge_hw_xmac_port_stats
1099 port_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID+1];
1100 struct vxge_hw_xmac_vpath_tx_stats
1101 vpath_tx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
1102 struct vxge_hw_xmac_vpath_rx_stats
1103 vpath_rx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
1104};
1105
1106/**
1107 * struct vxge_hw_vpath_stats_hw_info - Titan vpath hardware statistics.
1108 * @ini_num_mwr_sent: The number of PCI memory writes initiated by the PIC block
1109 * for the given VPATH
1110 * @ini_num_mrd_sent: The number of PCI memory reads initiated by the PIC block
1111 * @ini_num_cpl_rcvd: The number of PCI read completions received by the
1112 * PIC block
1113 * @ini_num_mwr_byte_sent: The number of PCI memory write bytes sent by the PIC
1114 * block to the host
1115 * @ini_num_cpl_byte_rcvd: The number of PCI read completion bytes received by
1116 * the PIC block
1117 * @wrcrdtarb_xoff: TBD
1118 * @rdcrdtarb_xoff: TBD
1119 * @vpath_genstats_count0: TBD
1120 * @vpath_genstats_count1: TBD
1121 * @vpath_genstats_count2: TBD
1122 * @vpath_genstats_count3: TBD
1123 * @vpath_genstats_count4: TBD
1124 * @vpath_gennstats_count5: TBD
1125 * @tx_stats: Transmit stats
1126 * @rx_stats: Receive stats
1127 * @prog_event_vnum1: Programmable statistic. Increments when internal logic
1128 * detects a certain event. See register
1129 * XMAC_STATS_CFG.EVENT_VNUM1_CFG for more information.
1130 * @prog_event_vnum0: Programmable statistic. Increments when internal logic
1131 * detects a certain event. See register
1132 * XMAC_STATS_CFG.EVENT_VNUM0_CFG for more information.
1133 * @prog_event_vnum3: Programmable statistic. Increments when internal logic
1134 * detects a certain event. See register
1135 * XMAC_STATS_CFG.EVENT_VNUM3_CFG for more information.
1136 * @prog_event_vnum2: Programmable statistic. Increments when internal logic
1137 * detects a certain event. See register
1138 * XMAC_STATS_CFG.EVENT_VNUM2_CFG for more information.
1139 * @rx_multi_cast_frame_discard: TBD
1140 * @rx_frm_transferred: TBD
1141 * @rxd_returned: TBD
1142 * @rx_mpa_len_fail_frms: Count of received frames
1143 * that fail the MPA length check
1144 * @rx_mpa_mrk_fail_frms: Count of received frames
1145 * that fail the MPA marker check
1146 * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA CRC check
1147 * @rx_permitted_frms: Count of frames that pass through the FAU and on to the
1148 * frame buffer (and subsequently to the host).
1149 * @rx_vp_reset_discarded_frms: Count of receive frames that are discarded
1150 * because the VPATH is in reset
1151 * @rx_wol_frms: Count of received "magic packet" frames. Stat increments
1152 * whenever the received frame matches the VPATH's Wake-on-LAN
1153 * signature(s) CRC.
1154 * @tx_vp_reset_discarded_frms: Count of transmit frames that are discarded
1155 * because the VPATH is in reset. Includes frames that are discarded
1156 * because the current VPIN does not match that VPIN of the frame
1157 *
1158 * Titan vpath hardware statistics.
1159 */
1160struct vxge_hw_vpath_stats_hw_info {
1161/*0x000*/ u32 ini_num_mwr_sent;
1162/*0x004*/ u32 unused1;
1163/*0x008*/ u32 ini_num_mrd_sent;
1164/*0x00c*/ u32 unused2;
1165/*0x010*/ u32 ini_num_cpl_rcvd;
1166/*0x014*/ u32 unused3;
1167/*0x018*/ u64 ini_num_mwr_byte_sent;
1168/*0x020*/ u64 ini_num_cpl_byte_rcvd;
1169/*0x028*/ u32 wrcrdtarb_xoff;
1170/*0x02c*/ u32 unused4;
1171/*0x030*/ u32 rdcrdtarb_xoff;
1172/*0x034*/ u32 unused5;
1173/*0x038*/ u32 vpath_genstats_count0;
1174/*0x03c*/ u32 vpath_genstats_count1;
1175/*0x040*/ u32 vpath_genstats_count2;
1176/*0x044*/ u32 vpath_genstats_count3;
1177/*0x048*/ u32 vpath_genstats_count4;
1178/*0x04c*/ u32 unused6;
1179/*0x050*/ u32 vpath_genstats_count5;
1180/*0x054*/ u32 unused7;
1181/*0x058*/ struct vxge_hw_xmac_vpath_tx_stats tx_stats;
1182/*0x0e8*/ struct vxge_hw_xmac_vpath_rx_stats rx_stats;
1183/*0x220*/ u64 unused9;
1184/*0x228*/ u32 prog_event_vnum1;
1185/*0x22c*/ u32 prog_event_vnum0;
1186/*0x230*/ u32 prog_event_vnum3;
1187/*0x234*/ u32 prog_event_vnum2;
1188/*0x238*/ u16 rx_multi_cast_frame_discard;
1189/*0x23a*/ u8 unused10[6];
1190/*0x240*/ u32 rx_frm_transferred;
1191/*0x244*/ u32 unused11;
1192/*0x248*/ u16 rxd_returned;
1193/*0x24a*/ u8 unused12[6];
1194/*0x252*/ u16 rx_mpa_len_fail_frms;
1195/*0x254*/ u16 rx_mpa_mrk_fail_frms;
1196/*0x256*/ u16 rx_mpa_crc_fail_frms;
1197/*0x258*/ u16 rx_permitted_frms;
1198/*0x25c*/ u64 rx_vp_reset_discarded_frms;
1199/*0x25e*/ u64 rx_wol_frms;
1200/*0x260*/ u64 tx_vp_reset_discarded_frms;
1201} __packed;
1202
1203
1204/**
1205 * struct vxge_hw_device_stats_mrpcim_info - Titan mrpcim hardware statistics.
1206 * @pic.ini_rd_drop 0x0000 4 Number of DMA reads initiated
1207 * by the adapter that were discarded because the VPATH is out of service
1208 * @pic.ini_wr_drop 0x0004 4 Number of DMA writes initiated by the
1209 * adapter that were discared because the VPATH is out of service
1210 * @pic.wrcrdtarb_ph_crdt_depleted[vplane0] 0x0008 4 Number of times
1211 * the posted header credits for upstream PCI writes were depleted
1212 * @pic.wrcrdtarb_ph_crdt_depleted[vplane1] 0x0010 4 Number of times
1213 * the posted header credits for upstream PCI writes were depleted
1214 * @pic.wrcrdtarb_ph_crdt_depleted[vplane2] 0x0018 4 Number of times
1215 * the posted header credits for upstream PCI writes were depleted
1216 * @pic.wrcrdtarb_ph_crdt_depleted[vplane3] 0x0020 4 Number of times
1217 * the posted header credits for upstream PCI writes were depleted
1218 * @pic.wrcrdtarb_ph_crdt_depleted[vplane4] 0x0028 4 Number of times
1219 * the posted header credits for upstream PCI writes were depleted
1220 * @pic.wrcrdtarb_ph_crdt_depleted[vplane5] 0x0030 4 Number of times
1221 * the posted header credits for upstream PCI writes were depleted
1222 * @pic.wrcrdtarb_ph_crdt_depleted[vplane6] 0x0038 4 Number of times
1223 * the posted header credits for upstream PCI writes were depleted
1224 * @pic.wrcrdtarb_ph_crdt_depleted[vplane7] 0x0040 4 Number of times
1225 * the posted header credits for upstream PCI writes were depleted
1226 * @pic.wrcrdtarb_ph_crdt_depleted[vplane8] 0x0048 4 Number of times
1227 * the posted header credits for upstream PCI writes were depleted
1228 * @pic.wrcrdtarb_ph_crdt_depleted[vplane9] 0x0050 4 Number of times
1229 * the posted header credits for upstream PCI writes were depleted
1230 * @pic.wrcrdtarb_ph_crdt_depleted[vplane10] 0x0058 4 Number of times
1231 * the posted header credits for upstream PCI writes were depleted
1232 * @pic.wrcrdtarb_ph_crdt_depleted[vplane11] 0x0060 4 Number of times
1233 * the posted header credits for upstream PCI writes were depleted
1234 * @pic.wrcrdtarb_ph_crdt_depleted[vplane12] 0x0068 4 Number of times
1235 * the posted header credits for upstream PCI writes were depleted
1236 * @pic.wrcrdtarb_ph_crdt_depleted[vplane13] 0x0070 4 Number of times
1237 * the posted header credits for upstream PCI writes were depleted
1238 * @pic.wrcrdtarb_ph_crdt_depleted[vplane14] 0x0078 4 Number of times
1239 * the posted header credits for upstream PCI writes were depleted
1240 * @pic.wrcrdtarb_ph_crdt_depleted[vplane15] 0x0080 4 Number of times
1241 * the posted header credits for upstream PCI writes were depleted
1242 * @pic.wrcrdtarb_ph_crdt_depleted[vplane16] 0x0088 4 Number of times
1243 * the posted header credits for upstream PCI writes were depleted
1244 * @pic.wrcrdtarb_pd_crdt_depleted[vplane0] 0x0090 4 Number of times
1245 * the posted data credits for upstream PCI writes were depleted
1246 * @pic.wrcrdtarb_pd_crdt_depleted[vplane1] 0x0098 4 Number of times
1247 * the posted data credits for upstream PCI writes were depleted
1248 * @pic.wrcrdtarb_pd_crdt_depleted[vplane2] 0x00a0 4 Number of times
1249 * the posted data credits for upstream PCI writes were depleted
1250 * @pic.wrcrdtarb_pd_crdt_depleted[vplane3] 0x00a8 4 Number of times
1251 * the posted data credits for upstream PCI writes were depleted
1252 * @pic.wrcrdtarb_pd_crdt_depleted[vplane4] 0x00b0 4 Number of times
1253 * the posted data credits for upstream PCI writes were depleted
1254 * @pic.wrcrdtarb_pd_crdt_depleted[vplane5] 0x00b8 4 Number of times
1255 * the posted data credits for upstream PCI writes were depleted
1256 * @pic.wrcrdtarb_pd_crdt_depleted[vplane6] 0x00c0 4 Number of times
1257 * the posted data credits for upstream PCI writes were depleted
1258 * @pic.wrcrdtarb_pd_crdt_depleted[vplane7] 0x00c8 4 Number of times
1259 * the posted data credits for upstream PCI writes were depleted
1260 * @pic.wrcrdtarb_pd_crdt_depleted[vplane8] 0x00d0 4 Number of times
1261 * the posted data credits for upstream PCI writes were depleted
1262 * @pic.wrcrdtarb_pd_crdt_depleted[vplane9] 0x00d8 4 Number of times
1263 * the posted data credits for upstream PCI writes were depleted
1264 * @pic.wrcrdtarb_pd_crdt_depleted[vplane10] 0x00e0 4 Number of times
1265 * the posted data credits for upstream PCI writes were depleted
1266 * @pic.wrcrdtarb_pd_crdt_depleted[vplane11] 0x00e8 4 Number of times
1267 * the posted data credits for upstream PCI writes were depleted
1268 * @pic.wrcrdtarb_pd_crdt_depleted[vplane12] 0x00f0 4 Number of times
1269 * the posted data credits for upstream PCI writes were depleted
1270 * @pic.wrcrdtarb_pd_crdt_depleted[vplane13] 0x00f8 4 Number of times
1271 * the posted data credits for upstream PCI writes were depleted
1272 * @pic.wrcrdtarb_pd_crdt_depleted[vplane14] 0x0100 4 Number of times
1273 * the posted data credits for upstream PCI writes were depleted
1274 * @pic.wrcrdtarb_pd_crdt_depleted[vplane15] 0x0108 4 Number of times
1275 * the posted data credits for upstream PCI writes were depleted
1276 * @pic.wrcrdtarb_pd_crdt_depleted[vplane16] 0x0110 4 Number of times
1277 * the posted data credits for upstream PCI writes were depleted
1278 * @pic.rdcrdtarb_nph_crdt_depleted[vplane0] 0x0118 4 Number of times
1279 * the non-posted header credits for upstream PCI reads were depleted
1280 * @pic.rdcrdtarb_nph_crdt_depleted[vplane1] 0x0120 4 Number of times
1281 * the non-posted header credits for upstream PCI reads were depleted
1282 * @pic.rdcrdtarb_nph_crdt_depleted[vplane2] 0x0128 4 Number of times
1283 * the non-posted header credits for upstream PCI reads were depleted
1284 * @pic.rdcrdtarb_nph_crdt_depleted[vplane3] 0x0130 4 Number of times
1285 * the non-posted header credits for upstream PCI reads were depleted
1286 * @pic.rdcrdtarb_nph_crdt_depleted[vplane4] 0x0138 4 Number of times
1287 * the non-posted header credits for upstream PCI reads were depleted
1288 * @pic.rdcrdtarb_nph_crdt_depleted[vplane5] 0x0140 4 Number of times
1289 * the non-posted header credits for upstream PCI reads were depleted
1290 * @pic.rdcrdtarb_nph_crdt_depleted[vplane6] 0x0148 4 Number of times
1291 * the non-posted header credits for upstream PCI reads were depleted
1292 * @pic.rdcrdtarb_nph_crdt_depleted[vplane7] 0x0150 4 Number of times
1293 * the non-posted header credits for upstream PCI reads were depleted
1294 * @pic.rdcrdtarb_nph_crdt_depleted[vplane8] 0x0158 4 Number of times
1295 * the non-posted header credits for upstream PCI reads were depleted
1296 * @pic.rdcrdtarb_nph_crdt_depleted[vplane9] 0x0160 4 Number of times
1297 * the non-posted header credits for upstream PCI reads were depleted
1298 * @pic.rdcrdtarb_nph_crdt_depleted[vplane10] 0x0168 4 Number of times
1299 * the non-posted header credits for upstream PCI reads were depleted
1300 * @pic.rdcrdtarb_nph_crdt_depleted[vplane11] 0x0170 4 Number of times
1301 * the non-posted header credits for upstream PCI reads were depleted
1302 * @pic.rdcrdtarb_nph_crdt_depleted[vplane12] 0x0178 4 Number of times
1303 * the non-posted header credits for upstream PCI reads were depleted
1304 * @pic.rdcrdtarb_nph_crdt_depleted[vplane13] 0x0180 4 Number of times
1305 * the non-posted header credits for upstream PCI reads were depleted
1306 * @pic.rdcrdtarb_nph_crdt_depleted[vplane14] 0x0188 4 Number of times
1307 * the non-posted header credits for upstream PCI reads were depleted
1308 * @pic.rdcrdtarb_nph_crdt_depleted[vplane15] 0x0190 4 Number of times
1309 * the non-posted header credits for upstream PCI reads were depleted
1310 * @pic.rdcrdtarb_nph_crdt_depleted[vplane16] 0x0198 4 Number of times
1311 * the non-posted header credits for upstream PCI reads were depleted
1312 * @pic.ini_rd_vpin_drop 0x01a0 4 Number of DMA reads initiated by
1313 * the adapter that were discarded because the VPATH instance number does
1314 * not match
1315 * @pic.ini_wr_vpin_drop 0x01a4 4 Number of DMA writes initiated
1316 * by the adapter that were discarded because the VPATH instance number
1317 * does not match
1318 * @pic.genstats_count0 0x01a8 4 Configurable statistic #1. Refer
1319 * to the GENSTATS0_CFG for information on configuring this statistic
1320 * @pic.genstats_count1 0x01ac 4 Configurable statistic #2. Refer
1321 * to the GENSTATS1_CFG for information on configuring this statistic
1322 * @pic.genstats_count2 0x01b0 4 Configurable statistic #3. Refer
1323 * to the GENSTATS2_CFG for information on configuring this statistic
1324 * @pic.genstats_count3 0x01b4 4 Configurable statistic #4. Refer
1325 * to the GENSTATS3_CFG for information on configuring this statistic
1326 * @pic.genstats_count4 0x01b8 4 Configurable statistic #5. Refer
1327 * to the GENSTATS4_CFG for information on configuring this statistic
1328 * @pic.genstats_count5 0x01c0 4 Configurable statistic #6. Refer
1329 * to the GENSTATS5_CFG for information on configuring this statistic
1330 * @pci.rstdrop_cpl 0x01c8 4
1331 * @pci.rstdrop_msg 0x01cc 4
1332 * @pci.rstdrop_client1 0x01d0 4
1333 * @pci.rstdrop_client0 0x01d4 4
1334 * @pci.rstdrop_client2 0x01d8 4
1335 * @pci.depl_cplh[vplane0] 0x01e2 2 Number of times completion
1336 * header credits were depleted
1337 * @pci.depl_nph[vplane0] 0x01e4 2 Number of times non posted
1338 * header credits were depleted
1339 * @pci.depl_ph[vplane0] 0x01e6 2 Number of times the posted
1340 * header credits were depleted
1341 * @pci.depl_cplh[vplane1] 0x01ea 2
1342 * @pci.depl_nph[vplane1] 0x01ec 2
1343 * @pci.depl_ph[vplane1] 0x01ee 2
1344 * @pci.depl_cplh[vplane2] 0x01f2 2
1345 * @pci.depl_nph[vplane2] 0x01f4 2
1346 * @pci.depl_ph[vplane2] 0x01f6 2
1347 * @pci.depl_cplh[vplane3] 0x01fa 2
1348 * @pci.depl_nph[vplane3] 0x01fc 2
1349 * @pci.depl_ph[vplane3] 0x01fe 2
1350 * @pci.depl_cplh[vplane4] 0x0202 2
1351 * @pci.depl_nph[vplane4] 0x0204 2
1352 * @pci.depl_ph[vplane4] 0x0206 2
1353 * @pci.depl_cplh[vplane5] 0x020a 2
1354 * @pci.depl_nph[vplane5] 0x020c 2
1355 * @pci.depl_ph[vplane5] 0x020e 2
1356 * @pci.depl_cplh[vplane6] 0x0212 2
1357 * @pci.depl_nph[vplane6] 0x0214 2
1358 * @pci.depl_ph[vplane6] 0x0216 2
1359 * @pci.depl_cplh[vplane7] 0x021a 2
1360 * @pci.depl_nph[vplane7] 0x021c 2
1361 * @pci.depl_ph[vplane7] 0x021e 2
1362 * @pci.depl_cplh[vplane8] 0x0222 2
1363 * @pci.depl_nph[vplane8] 0x0224 2
1364 * @pci.depl_ph[vplane8] 0x0226 2
1365 * @pci.depl_cplh[vplane9] 0x022a 2
1366 * @pci.depl_nph[vplane9] 0x022c 2
1367 * @pci.depl_ph[vplane9] 0x022e 2
1368 * @pci.depl_cplh[vplane10] 0x0232 2
1369 * @pci.depl_nph[vplane10] 0x0234 2
1370 * @pci.depl_ph[vplane10] 0x0236 2
1371 * @pci.depl_cplh[vplane11] 0x023a 2
1372 * @pci.depl_nph[vplane11] 0x023c 2
1373 * @pci.depl_ph[vplane11] 0x023e 2
1374 * @pci.depl_cplh[vplane12] 0x0242 2
1375 * @pci.depl_nph[vplane12] 0x0244 2
1376 * @pci.depl_ph[vplane12] 0x0246 2
1377 * @pci.depl_cplh[vplane13] 0x024a 2
1378 * @pci.depl_nph[vplane13] 0x024c 2
1379 * @pci.depl_ph[vplane13] 0x024e 2
1380 * @pci.depl_cplh[vplane14] 0x0252 2
1381 * @pci.depl_nph[vplane14] 0x0254 2
1382 * @pci.depl_ph[vplane14] 0x0256 2
1383 * @pci.depl_cplh[vplane15] 0x025a 2
1384 * @pci.depl_nph[vplane15] 0x025c 2
1385 * @pci.depl_ph[vplane15] 0x025e 2
1386 * @pci.depl_cplh[vplane16] 0x0262 2
1387 * @pci.depl_nph[vplane16] 0x0264 2
1388 * @pci.depl_ph[vplane16] 0x0266 2
1389 * @pci.depl_cpld[vplane0] 0x026a 2 Number of times completion data
1390 * credits were depleted
1391 * @pci.depl_npd[vplane0] 0x026c 2 Number of times non posted data
1392 * credits were depleted
1393 * @pci.depl_pd[vplane0] 0x026e 2 Number of times the posted data
1394 * credits were depleted
1395 * @pci.depl_cpld[vplane1] 0x0272 2
1396 * @pci.depl_npd[vplane1] 0x0274 2
1397 * @pci.depl_pd[vplane1] 0x0276 2
1398 * @pci.depl_cpld[vplane2] 0x027a 2
1399 * @pci.depl_npd[vplane2] 0x027c 2
1400 * @pci.depl_pd[vplane2] 0x027e 2
1401 * @pci.depl_cpld[vplane3] 0x0282 2
1402 * @pci.depl_npd[vplane3] 0x0284 2
1403 * @pci.depl_pd[vplane3] 0x0286 2
1404 * @pci.depl_cpld[vplane4] 0x028a 2
1405 * @pci.depl_npd[vplane4] 0x028c 2
1406 * @pci.depl_pd[vplane4] 0x028e 2
1407 * @pci.depl_cpld[vplane5] 0x0292 2
1408 * @pci.depl_npd[vplane5] 0x0294 2
1409 * @pci.depl_pd[vplane5] 0x0296 2
1410 * @pci.depl_cpld[vplane6] 0x029a 2
1411 * @pci.depl_npd[vplane6] 0x029c 2
1412 * @pci.depl_pd[vplane6] 0x029e 2
1413 * @pci.depl_cpld[vplane7] 0x02a2 2
1414 * @pci.depl_npd[vplane7] 0x02a4 2
1415 * @pci.depl_pd[vplane7] 0x02a6 2
1416 * @pci.depl_cpld[vplane8] 0x02aa 2
1417 * @pci.depl_npd[vplane8] 0x02ac 2
1418 * @pci.depl_pd[vplane8] 0x02ae 2
1419 * @pci.depl_cpld[vplane9] 0x02b2 2
1420 * @pci.depl_npd[vplane9] 0x02b4 2
1421 * @pci.depl_pd[vplane9] 0x02b6 2
1422 * @pci.depl_cpld[vplane10] 0x02ba 2
1423 * @pci.depl_npd[vplane10] 0x02bc 2
1424 * @pci.depl_pd[vplane10] 0x02be 2
1425 * @pci.depl_cpld[vplane11] 0x02c2 2
1426 * @pci.depl_npd[vplane11] 0x02c4 2
1427 * @pci.depl_pd[vplane11] 0x02c6 2
1428 * @pci.depl_cpld[vplane12] 0x02ca 2
1429 * @pci.depl_npd[vplane12] 0x02cc 2
1430 * @pci.depl_pd[vplane12] 0x02ce 2
1431 * @pci.depl_cpld[vplane13] 0x02d2 2
1432 * @pci.depl_npd[vplane13] 0x02d4 2
1433 * @pci.depl_pd[vplane13] 0x02d6 2
1434 * @pci.depl_cpld[vplane14] 0x02da 2
1435 * @pci.depl_npd[vplane14] 0x02dc 2
1436 * @pci.depl_pd[vplane14] 0x02de 2
1437 * @pci.depl_cpld[vplane15] 0x02e2 2
1438 * @pci.depl_npd[vplane15] 0x02e4 2
1439 * @pci.depl_pd[vplane15] 0x02e6 2
1440 * @pci.depl_cpld[vplane16] 0x02ea 2
1441 * @pci.depl_npd[vplane16] 0x02ec 2
1442 * @pci.depl_pd[vplane16] 0x02ee 2
1443 * @xgmac_port[3];
1444 * @xgmac_aggr[2];
1445 * @xgmac.global_prog_event_gnum0 0x0ae0 8 Programmable statistic.
1446 * Increments when internal logic detects a certain event. See register
1447 * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM0_CFG for more information.
1448 * @xgmac.global_prog_event_gnum1 0x0ae8 8 Programmable statistic.
1449 * Increments when internal logic detects a certain event. See register
1450 * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM1_CFG for more information.
1451 * @xgmac.orp_lro_events 0x0af8 8
1452 * @xgmac.orp_bs_events 0x0b00 8
1453 * @xgmac.orp_iwarp_events 0x0b08 8
1454 * @xgmac.tx_permitted_frms 0x0b14 4
1455 * @xgmac.port2_tx_any_frms 0x0b1d 1
1456 * @xgmac.port1_tx_any_frms 0x0b1e 1
1457 * @xgmac.port0_tx_any_frms 0x0b1f 1
1458 * @xgmac.port2_rx_any_frms 0x0b25 1
1459 * @xgmac.port1_rx_any_frms 0x0b26 1
1460 * @xgmac.port0_rx_any_frms 0x0b27 1
1461 *
1462 * Titan mrpcim hardware statistics.
1463 */
1464struct vxge_hw_device_stats_mrpcim_info {
1465/*0x0000*/ u32 pic_ini_rd_drop;
1466/*0x0004*/ u32 pic_ini_wr_drop;
1467/*0x0008*/ struct {
1468 /*0x0000*/ u32 pic_wrcrdtarb_ph_crdt_depleted;
1469 /*0x0004*/ u32 unused1;
1470 } pic_wrcrdtarb_ph_crdt_depleted_vplane[17];
1471/*0x0090*/ struct {
1472 /*0x0000*/ u32 pic_wrcrdtarb_pd_crdt_depleted;
1473 /*0x0004*/ u32 unused2;
1474 } pic_wrcrdtarb_pd_crdt_depleted_vplane[17];
1475/*0x0118*/ struct {
1476 /*0x0000*/ u32 pic_rdcrdtarb_nph_crdt_depleted;
1477 /*0x0004*/ u32 unused3;
1478 } pic_rdcrdtarb_nph_crdt_depleted_vplane[17];
1479/*0x01a0*/ u32 pic_ini_rd_vpin_drop;
1480/*0x01a4*/ u32 pic_ini_wr_vpin_drop;
1481/*0x01a8*/ u32 pic_genstats_count0;
1482/*0x01ac*/ u32 pic_genstats_count1;
1483/*0x01b0*/ u32 pic_genstats_count2;
1484/*0x01b4*/ u32 pic_genstats_count3;
1485/*0x01b8*/ u32 pic_genstats_count4;
1486/*0x01bc*/ u32 unused4;
1487/*0x01c0*/ u32 pic_genstats_count5;
1488/*0x01c4*/ u32 unused5;
1489/*0x01c8*/ u32 pci_rstdrop_cpl;
1490/*0x01cc*/ u32 pci_rstdrop_msg;
1491/*0x01d0*/ u32 pci_rstdrop_client1;
1492/*0x01d4*/ u32 pci_rstdrop_client0;
1493/*0x01d8*/ u32 pci_rstdrop_client2;
1494/*0x01dc*/ u32 unused6;
1495/*0x01e0*/ struct {
1496 /*0x0000*/ u16 unused7;
1497 /*0x0002*/ u16 pci_depl_cplh;
1498 /*0x0004*/ u16 pci_depl_nph;
1499 /*0x0006*/ u16 pci_depl_ph;
1500 } pci_depl_h_vplane[17];
1501/*0x0268*/ struct {
1502 /*0x0000*/ u16 unused8;
1503 /*0x0002*/ u16 pci_depl_cpld;
1504 /*0x0004*/ u16 pci_depl_npd;
1505 /*0x0006*/ u16 pci_depl_pd;
1506 } pci_depl_d_vplane[17];
1507/*0x02f0*/ struct vxge_hw_xmac_port_stats xgmac_port[3];
1508/*0x0a10*/ struct vxge_hw_xmac_aggr_stats xgmac_aggr[2];
1509/*0x0ae0*/ u64 xgmac_global_prog_event_gnum0;
1510/*0x0ae8*/ u64 xgmac_global_prog_event_gnum1;
1511/*0x0af0*/ u64 unused7;
1512/*0x0af8*/ u64 unused8;
1513/*0x0b00*/ u64 unused9;
1514/*0x0b08*/ u64 unused10;
1515/*0x0b10*/ u32 unused11;
1516/*0x0b14*/ u32 xgmac_tx_permitted_frms;
1517/*0x0b18*/ u32 unused12;
1518/*0x0b1c*/ u8 unused13;
1519/*0x0b1d*/ u8 xgmac_port2_tx_any_frms;
1520/*0x0b1e*/ u8 xgmac_port1_tx_any_frms;
1521/*0x0b1f*/ u8 xgmac_port0_tx_any_frms;
1522/*0x0b20*/ u32 unused14;
1523/*0x0b24*/ u8 unused15;
1524/*0x0b25*/ u8 xgmac_port2_rx_any_frms;
1525/*0x0b26*/ u8 xgmac_port1_rx_any_frms;
1526/*0x0b27*/ u8 xgmac_port0_rx_any_frms;
1527} __packed;
1528
1529/**
1530 * struct vxge_hw_device_stats_hw_info - Titan hardware statistics.
1531 * @vpath_info: VPath statistics
1532 * @vpath_info_sav: Vpath statistics saved
1533 *
1534 * Titan hardware statistics.
1535 */
1536struct vxge_hw_device_stats_hw_info {
1537 struct vxge_hw_vpath_stats_hw_info
1538 *vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
1539 struct vxge_hw_vpath_stats_hw_info
1540 vpath_info_sav[VXGE_HW_MAX_VIRTUAL_PATHS];
1541};
1542
1543/**
1544 * struct vxge_hw_vpath_stats_sw_common_info - HW common
1545 * statistics for queues.
1546 * @full_cnt: Number of times the queue was full
1547 * @usage_cnt: usage count.
1548 * @usage_max: Maximum usage
1549 * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage.
1550 * @total_compl_cnt: Total descriptor completion count.
1551 *
1552 * Hw queue counters
1553 * See also: struct vxge_hw_vpath_stats_sw_fifo_info{},
1554 * struct vxge_hw_vpath_stats_sw_ring_info{},
1555 */
1556struct vxge_hw_vpath_stats_sw_common_info {
1557 u32 full_cnt;
1558 u32 usage_cnt;
1559 u32 usage_max;
1560 u32 reserve_free_swaps_cnt;
1561 u32 total_compl_cnt;
1562};
1563
1564/**
1565 * struct vxge_hw_vpath_stats_sw_fifo_info - HW fifo statistics
1566 * @common_stats: Common counters for all queues
1567 * @total_posts: Total number of postings on the queue.
1568 * @total_buffers: Total number of buffers posted.
1569 * @txd_t_code_err_cnt: Array of transmit transfer codes. The position
1570 * (index) in this array reflects the transfer code type, for instance
1571 * 0xA - "loss of link".
1572 * Value txd_t_code_err_cnt[i] reflects the
1573 * number of times the corresponding transfer code was encountered.
1574 *
1575 * HW fifo counters
1576 * See also: struct vxge_hw_vpath_stats_sw_common_info{},
1577 * struct vxge_hw_vpath_stats_sw_ring_info{},
1578 */
1579struct vxge_hw_vpath_stats_sw_fifo_info {
1580 struct vxge_hw_vpath_stats_sw_common_info common_stats;
1581 u32 total_posts;
1582 u32 total_buffers;
1583 u32 txd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
1584};
1585
1586/**
1587 * struct vxge_hw_vpath_stats_sw_ring_info - HW ring statistics
1588 * @common_stats: Common counters for all queues
1589 * @rxd_t_code_err_cnt: Array of receive transfer codes. The position
1590 * (index) in this array reflects the transfer code type,
1591 * for instance
1592 * 0x7 - for "invalid receive buffer size", or 0x8 - for ECC.
1593 * Value rxd_t_code_err_cnt[i] reflects the
1594 * number of times the corresponding transfer code was encountered.
1595 *
1596 * HW ring counters
1597 * See also: struct vxge_hw_vpath_stats_sw_common_info{},
1598 * struct vxge_hw_vpath_stats_sw_fifo_info{},
1599 */
1600struct vxge_hw_vpath_stats_sw_ring_info {
1601 struct vxge_hw_vpath_stats_sw_common_info common_stats;
1602 u32 rxd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
1603
1604};
1605
1606/**
1607 * struct vxge_hw_vpath_stats_sw_err - HW vpath error statistics
1608 * @unknown_alarms:
1609 * @network_sustained_fault:
1610 * @network_sustained_ok:
1611 * @kdfcctl_fifo0_overwrite:
1612 * @kdfcctl_fifo0_poison:
1613 * @kdfcctl_fifo0_dma_error:
1614 * @dblgen_fifo0_overflow:
1615 * @statsb_pif_chain_error:
1616 * @statsb_drop_timeout:
1617 * @target_illegal_access:
1618 * @ini_serr_det:
1619 * @prc_ring_bumps:
1620 * @prc_rxdcm_sc_err:
1621 * @prc_rxdcm_sc_abort:
1622 * @prc_quanta_size_err:
1623 *
1624 * HW vpath error statistics
1625 */
1626struct vxge_hw_vpath_stats_sw_err {
1627 u32 unknown_alarms;
1628 u32 network_sustained_fault;
1629 u32 network_sustained_ok;
1630 u32 kdfcctl_fifo0_overwrite;
1631 u32 kdfcctl_fifo0_poison;
1632 u32 kdfcctl_fifo0_dma_error;
1633 u32 dblgen_fifo0_overflow;
1634 u32 statsb_pif_chain_error;
1635 u32 statsb_drop_timeout;
1636 u32 target_illegal_access;
1637 u32 ini_serr_det;
1638 u32 prc_ring_bumps;
1639 u32 prc_rxdcm_sc_err;
1640 u32 prc_rxdcm_sc_abort;
1641 u32 prc_quanta_size_err;
1642};
1643
1644/**
1645 * struct vxge_hw_vpath_stats_sw_info - HW vpath sw statistics
1646 * @soft_reset_cnt: Number of times soft reset is done on this vpath.
1647 * @error_stats: error counters for the vpath
1648 * @ring_stats: counters for ring belonging to the vpath
1649 * @fifo_stats: counters for fifo belonging to the vpath
1650 *
1651 * HW vpath sw statistics
1652 * See also: struct vxge_hw_device_info{} }.
1653 */
1654struct vxge_hw_vpath_stats_sw_info {
1655 u32 soft_reset_cnt;
1656 struct vxge_hw_vpath_stats_sw_err error_stats;
1657 struct vxge_hw_vpath_stats_sw_ring_info ring_stats;
1658 struct vxge_hw_vpath_stats_sw_fifo_info fifo_stats;
1659};
1660
1661/**
1662 * struct vxge_hw_device_stats_sw_info - HW own per-device statistics.
1663 *
1664 * @not_traffic_intr_cnt: Number of times the host was interrupted
1665 * without new completions.
1666 * "Non-traffic interrupt counter".
1667 * @traffic_intr_cnt: Number of traffic interrupts for the device.
1668 * @total_intr_cnt: Total number of traffic interrupts for the device.
1669 * @total_intr_cnt == @traffic_intr_cnt +
1670 * @not_traffic_intr_cnt
1671 * @soft_reset_cnt: Number of times soft reset is done on this device.
1672 * @vpath_info: please see struct vxge_hw_vpath_stats_sw_info{}
1673 * HW per-device statistics.
1674 */
1675struct vxge_hw_device_stats_sw_info {
1676 u32 not_traffic_intr_cnt;
1677 u32 traffic_intr_cnt;
1678 u32 total_intr_cnt;
1679 u32 soft_reset_cnt;
1680 struct vxge_hw_vpath_stats_sw_info
1681 vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
1682};
1683
1684/**
1685 * struct vxge_hw_device_stats_sw_err - HW device error statistics.
1686 * @vpath_alarms: Number of vpath alarms
1687 *
1688 * HW Device error stats
1689 */
1690struct vxge_hw_device_stats_sw_err {
1691 u32 vpath_alarms;
1692};
1693
1694/**
1695 * struct vxge_hw_device_stats - Contains HW per-device statistics,
1696 * including hw.
1697 * @devh: HW device handle.
1698 * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
1699 * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
1700 * space.
1701 * @hw_info_dma_acch: One more DMA handle used subsequently to free the
1702 * DMA object. Note that this and the previous handle have
1703 * physical meaning for Solaris; on Windows and Linux the
1704 * corresponding value will be simply pointer to PCI device.
1705 *
1706 * @hw_dev_info_stats: Titan statistics maintained by the hardware.
1707 * @sw_dev_info_stats: HW's "soft" device informational statistics, e.g. number
1708 * of completions per interrupt.
1709 * @sw_dev_err_stats: HW's "soft" device error statistics.
1710 *
1711 * Structure-container of HW per-device statistics. Note that per-channel
1712 * statistics are kept in separate structures under HW's fifo and ring
1713 * channels.
1714 */
1715struct vxge_hw_device_stats {
1716 /* handles */
1717 struct __vxge_hw_device *devh;
1718
1719 /* HW device hardware statistics */
1720 struct vxge_hw_device_stats_hw_info hw_dev_info_stats;
1721
1722 /* HW device "soft" stats */
1723 struct vxge_hw_device_stats_sw_err sw_dev_err_stats;
1724 struct vxge_hw_device_stats_sw_info sw_dev_info_stats;
1725
1726};
1727
1728enum vxge_hw_status vxge_hw_device_hw_stats_enable(
1729 struct __vxge_hw_device *devh);
1730
1731enum vxge_hw_status vxge_hw_device_stats_get(
1732 struct __vxge_hw_device *devh,
1733 struct vxge_hw_device_stats_hw_info *hw_stats);
1734
1735enum vxge_hw_status vxge_hw_driver_stats_get(
1736 struct __vxge_hw_device *devh,
1737 struct vxge_hw_device_stats_sw_info *sw_stats);
1738
1739enum vxge_hw_status vxge_hw_mrpcim_stats_enable(struct __vxge_hw_device *devh);
1740
1741enum vxge_hw_status vxge_hw_mrpcim_stats_disable(struct __vxge_hw_device *devh);
1742
1743enum vxge_hw_status
1744vxge_hw_mrpcim_stats_access(
1745 struct __vxge_hw_device *devh,
1746 u32 operation,
1747 u32 location,
1748 u32 offset,
1749 u64 *stat);
1750
1751enum vxge_hw_status
1752vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
1753 struct vxge_hw_xmac_stats *xmac_stats);
1754
1755/**
1756 * enum enum vxge_hw_mgmt_reg_type - Register types.
1757 *
1758 * @vxge_hw_mgmt_reg_type_legacy: Legacy registers
1759 * @vxge_hw_mgmt_reg_type_toc: TOC Registers
1760 * @vxge_hw_mgmt_reg_type_common: Common Registers
1761 * @vxge_hw_mgmt_reg_type_mrpcim: mrpcim registers
1762 * @vxge_hw_mgmt_reg_type_srpcim: srpcim registers
1763 * @vxge_hw_mgmt_reg_type_vpmgmt: vpath management registers
1764 * @vxge_hw_mgmt_reg_type_vpath: vpath registers
1765 *
1766 * Register type enumaration
1767 */
1768enum vxge_hw_mgmt_reg_type {
1769 vxge_hw_mgmt_reg_type_legacy = 0,
1770 vxge_hw_mgmt_reg_type_toc = 1,
1771 vxge_hw_mgmt_reg_type_common = 2,
1772 vxge_hw_mgmt_reg_type_mrpcim = 3,
1773 vxge_hw_mgmt_reg_type_srpcim = 4,
1774 vxge_hw_mgmt_reg_type_vpmgmt = 5,
1775 vxge_hw_mgmt_reg_type_vpath = 6
1776};
1777
1778enum vxge_hw_status
1779vxge_hw_mgmt_reg_read(struct __vxge_hw_device *devh,
1780 enum vxge_hw_mgmt_reg_type type,
1781 u32 index,
1782 u32 offset,
1783 u64 *value);
1784
1785enum vxge_hw_status
1786vxge_hw_mgmt_reg_write(struct __vxge_hw_device *devh,
1787 enum vxge_hw_mgmt_reg_type type,
1788 u32 index,
1789 u32 offset,
1790 u64 value);
1791
1792/**
1793 * enum enum vxge_hw_rxd_state - Descriptor (RXD) state.
1794 * @VXGE_HW_RXD_STATE_NONE: Invalid state.
1795 * @VXGE_HW_RXD_STATE_AVAIL: Descriptor is available for reservation.
1796 * @VXGE_HW_RXD_STATE_POSTED: Descriptor is posted for processing by the
1797 * device.
1798 * @VXGE_HW_RXD_STATE_FREED: Descriptor is free and can be reused for
1799 * filling-in and posting later.
1800 *
1801 * Titan/HW descriptor states.
1802 *
1803 */
1804enum vxge_hw_rxd_state {
1805 VXGE_HW_RXD_STATE_NONE = 0,
1806 VXGE_HW_RXD_STATE_AVAIL = 1,
1807 VXGE_HW_RXD_STATE_POSTED = 2,
1808 VXGE_HW_RXD_STATE_FREED = 3
1809};
1810
1811/**
1812 * struct vxge_hw_ring_rxd_info - Extended information associated with a
1813 * completed ring descriptor.
1814 * @syn_flag: SYN flag
1815 * @is_icmp: Is ICMP
1816 * @fast_path_eligible: Fast Path Eligible flag
1817 * @l3_cksum: in L3 checksum is valid
1818 * @l3_cksum: Result of IP checksum check (by Titan hardware).
1819 * This field containing VXGE_HW_L3_CKSUM_OK would mean that
1820 * the checksum is correct, otherwise - the datagram is
1821 * corrupted.
1822 * @l4_cksum: in L4 checksum is valid
1823 * @l4_cksum: Result of TCP/UDP checksum check (by Titan hardware).
1824 * This field containing VXGE_HW_L4_CKSUM_OK would mean that
1825 * the checksum is correct. Otherwise - the packet is
1826 * corrupted.
1827 * @frame: Zero or more of enum vxge_hw_frame_type flags.
1828 * See enum vxge_hw_frame_type{}.
1829 * @proto: zero or more of enum vxge_hw_frame_proto flags. Reporting bits for
1830 * various higher-layer protocols, including (but note restricted to)
1831 * TCP and UDP. See enum vxge_hw_frame_proto{}.
1832 * @is_vlan: If vlan tag is valid
1833 * @vlan: VLAN tag extracted from the received frame.
1834 * @rth_bucket: RTH bucket
1835 * @rth_it_hit: Set, If RTH hash value calculated by the Titan hardware
1836 * has a matching entry in the Indirection table.
1837 * @rth_spdm_hit: Set, If RTH hash value calculated by the Titan hardware
1838 * has a matching entry in the Socket Pair Direct Match table.
1839 * @rth_hash_type: RTH hash code of the function used to calculate the hash.
1840 * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Titan
1841 * hardware if RTH is enabled.
1842 */
1843struct vxge_hw_ring_rxd_info {
1844 u32 syn_flag;
1845 u32 is_icmp;
1846 u32 fast_path_eligible;
1847 u32 l3_cksum_valid;
1848 u32 l3_cksum;
1849 u32 l4_cksum_valid;
1850 u32 l4_cksum;
1851 u32 frame;
1852 u32 proto;
1853 u32 is_vlan;
1854 u32 vlan;
1855 u32 rth_bucket;
1856 u32 rth_it_hit;
1857 u32 rth_spdm_hit;
1858 u32 rth_hash_type;
1859 u32 rth_value;
1860};
1861/**
1862 * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
1863 * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
1864 * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
1865 * configuration mismatch.
1866 * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
1867 * configuration mismatch.
1868 * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
1869 * presentation configuration mismatch.
1870 * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
1871 * such as unknown IPv6 header.
1872 * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
1873 * error, such as FCS or ECC).
1874 * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
1875 * s) were not appropriately sized and data loss occurred.
1876 * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
1877 * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
1878 * Segment1 exceeded the capacity of Buffer1 and the remainder
1879 * was placed in Buffer2. Segment2 now starts in Buffer3.
1880 * No data loss or errors occurred.
1881 * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
1882 * assigned buffers has a size of 0 bytes.
1883 * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
1884 * VPath Reset or because of a VPIN mismatch.
1885 * @VXGE_HW_RING_T_CODE_UNUSED: Unused
1886 * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
1887 * transfer code condition occurred.
1888 *
1889 * Transfer codes returned by adapter.
1890 */
1891enum vxge_hw_ring_tcode {
1892 VXGE_HW_RING_T_CODE_OK = 0x0,
1893 VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
1894 VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
1895 VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
1896 VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
1897 VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
1898 VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
1899 VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
1900 VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
1901 VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
1902 VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
1903 VXGE_HW_RING_T_CODE_UNUSED = 0xE,
1904 VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
1905};
1906
1907enum vxge_hw_status vxge_hw_ring_rxd_reserve(
1908 struct __vxge_hw_ring *ring_handle,
1909 void **rxdh);
1910
1911void
1912vxge_hw_ring_rxd_pre_post(
1913 struct __vxge_hw_ring *ring_handle,
1914 void *rxdh);
1915
1916void
1917vxge_hw_ring_rxd_post_post(
1918 struct __vxge_hw_ring *ring_handle,
1919 void *rxdh);
1920
1921enum vxge_hw_status
1922vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
1923
1924void
1925vxge_hw_ring_rxd_post_post_wmb(
1926 struct __vxge_hw_ring *ring_handle,
1927 void *rxdh);
1928
1929void vxge_hw_ring_rxd_post(
1930 struct __vxge_hw_ring *ring_handle,
1931 void *rxdh);
1932
1933enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1934 struct __vxge_hw_ring *ring_handle,
1935 void **rxdh,
1936 u8 *t_code);
1937
1938enum vxge_hw_status vxge_hw_ring_handle_tcode(
1939 struct __vxge_hw_ring *ring_handle,
1940 void *rxdh,
1941 u8 t_code);
1942
1943void vxge_hw_ring_rxd_free(
1944 struct __vxge_hw_ring *ring_handle,
1945 void *rxdh);
1946
1947/**
1948 * enum enum vxge_hw_frame_proto - Higher-layer ethernet protocols.
1949 * @VXGE_HW_FRAME_PROTO_VLAN_TAGGED: VLAN.
1950 * @VXGE_HW_FRAME_PROTO_IPV4: IPv4.
1951 * @VXGE_HW_FRAME_PROTO_IPV6: IPv6.
1952 * @VXGE_HW_FRAME_PROTO_IP_FRAG: IP fragmented.
1953 * @VXGE_HW_FRAME_PROTO_TCP: TCP.
1954 * @VXGE_HW_FRAME_PROTO_UDP: UDP.
1955 * @VXGE_HW_FRAME_PROTO_TCP_OR_UDP: TCP or UDP.
1956 *
1957 * Higher layer ethernet protocols and options.
1958 */
1959enum vxge_hw_frame_proto {
1960 VXGE_HW_FRAME_PROTO_VLAN_TAGGED = 0x80,
1961 VXGE_HW_FRAME_PROTO_IPV4 = 0x10,
1962 VXGE_HW_FRAME_PROTO_IPV6 = 0x08,
1963 VXGE_HW_FRAME_PROTO_IP_FRAG = 0x04,
1964 VXGE_HW_FRAME_PROTO_TCP = 0x02,
1965 VXGE_HW_FRAME_PROTO_UDP = 0x01,
1966 VXGE_HW_FRAME_PROTO_TCP_OR_UDP = (VXGE_HW_FRAME_PROTO_TCP | \
1967 VXGE_HW_FRAME_PROTO_UDP)
1968};
1969
1970/**
1971 * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD
1972 * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL
1973 * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL
1974 * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL
1975 * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.
1976 *
1977 * These gather codes are used to indicate the position of a TxD in a TxD list
1978 */
1979enum vxge_hw_fifo_gather_code {
1980 VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2,
1981 VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0,
1982 VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1,
1983 VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3
1984};
1985
1986/**
1987 * enum enum vxge_hw_fifo_tcode - tcodes used in fifo
1988 * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK
1989 * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or
1990 * frame data) returned with corrupt data.
1991 * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned
1992 * with no data.
1993 * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a
1994 * frame or LSO MSS that was too long (>9800B).
1995 * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send
1996 * Offload operation, due to improper header template,
1997 * unsupported protocol, etc.
1998 * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused
1999 * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple
2000 * data buffer transfer errors are encountered (see below).
2001 * Otherwise it is set to 0.
2002 *
2003 * These tcodes are returned in various API for TxD status
2004 */
2005enum vxge_hw_fifo_tcode {
2006 VXGE_HW_FIFO_T_CODE_OK = 0x0,
2007 VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1,
2008 VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2,
2009 VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3,
2010 VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4,
2011 VXGE_HW_FIFO_T_CODE_UNUSED = 0x7,
2012 VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8
2013};
2014
2015enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
2016 struct __vxge_hw_fifo *fifoh,
2017 void **txdlh,
2018 void **txdl_priv);
2019
2020void vxge_hw_fifo_txdl_buffer_set(
2021 struct __vxge_hw_fifo *fifo_handle,
2022 void *txdlh,
2023 u32 frag_idx,
2024 dma_addr_t dma_pointer,
2025 u32 size);
2026
2027void vxge_hw_fifo_txdl_post(
2028 struct __vxge_hw_fifo *fifo_handle,
2029 void *txdlh);
2030
2031u32 vxge_hw_fifo_free_txdl_count_get(
2032 struct __vxge_hw_fifo *fifo_handle);
2033
2034enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
2035 struct __vxge_hw_fifo *fifoh,
2036 void **txdlh,
2037 enum vxge_hw_fifo_tcode *t_code);
2038
2039enum vxge_hw_status vxge_hw_fifo_handle_tcode(
2040 struct __vxge_hw_fifo *fifoh,
2041 void *txdlh,
2042 enum vxge_hw_fifo_tcode t_code);
2043
2044void vxge_hw_fifo_txdl_free(
2045 struct __vxge_hw_fifo *fifoh,
2046 void *txdlh);
2047
2048/*
2049 * Device
2050 */
2051
2052#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
2053#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
2054
2055/*
2056 * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
2057 * @dma_addr: DMA (mapped) address of _this_ descriptor.
2058 * @dma_handle: DMA handle used to map the descriptor onto device.
2059 * @dma_offset: Descriptor's offset in the memory block. HW allocates
2060 * descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE
2061 * bytes. Each memblock is contiguous DMA-able memory. Each
2062 * memblock contains 1 or more 4KB RxD blocks visible to the
2063 * Titan hardware.
2064 * @dma_object: DMA address and handle of the memory block that contains
2065 * the descriptor. This member is used only in the "checked"
2066 * version of the HW (to enforce certain assertions);
2067 * otherwise it gets compiled out.
2068 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
2069 *
2070 * Per-receive decsriptor HW-private data. HW uses the space to keep DMA
2071 * information associated with the descriptor. Note that driver can ask HW
2072 * to allocate additional per-descriptor space for its own (driver-specific)
2073 * purposes.
2074 */
2075struct __vxge_hw_ring_rxd_priv {
2076 dma_addr_t dma_addr;
2077 struct pci_dev *dma_handle;
2078 ptrdiff_t dma_offset;
2079#ifdef VXGE_DEBUG_ASSERT
2080 struct vxge_hw_mempool_dma *dma_object;
2081#endif
2082};
2083
2084struct vxge_hw_mempool_cbs {
2085 void (*item_func_alloc)(
2086 struct vxge_hw_mempool *mempoolh,
2087 u32 memblock_index,
2088 struct vxge_hw_mempool_dma *dma_object,
2089 u32 index,
2090 u32 is_last);
2091};
2092
2093#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
2094 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
2095
2096enum vxge_hw_status
2097__vxge_hw_vpath_rts_table_get(
2098 struct __vxge_hw_vpath_handle *vpath_handle,
2099 u32 action,
2100 u32 rts_table,
2101 u32 offset,
2102 u64 *data1,
2103 u64 *data2);
2104
2105enum vxge_hw_status
2106__vxge_hw_vpath_rts_table_set(
2107 struct __vxge_hw_vpath_handle *vpath_handle,
2108 u32 action,
2109 u32 rts_table,
2110 u32 offset,
2111 u64 data1,
2112 u64 data2);
2113
2114enum vxge_hw_status
2115__vxge_hw_vpath_enable(
2116 struct __vxge_hw_device *devh,
2117 u32 vp_id);
2118
2119void vxge_hw_device_intr_enable(
2120 struct __vxge_hw_device *devh);
2121
2122u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode);
2123
2124void vxge_hw_device_intr_disable(
2125 struct __vxge_hw_device *devh);
2126
2127void vxge_hw_device_mask_all(
2128 struct __vxge_hw_device *devh);
2129
2130void vxge_hw_device_unmask_all(
2131 struct __vxge_hw_device *devh);
2132
2133enum vxge_hw_status vxge_hw_device_begin_irq(
2134 struct __vxge_hw_device *devh,
2135 u32 skip_alarms,
2136 u64 *reason);
2137
2138void vxge_hw_device_clear_tx_rx(
2139 struct __vxge_hw_device *devh);
2140
2141/*
2142 * Virtual Paths
2143 */
2144
2145void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
2146
2147void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
2148
2149u32 vxge_hw_vpath_id(
2150 struct __vxge_hw_vpath_handle *vpath_handle);
2151
2152enum vxge_hw_vpath_mac_addr_add_mode {
2153 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE = 0,
2154 VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE = 1,
2155 VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE = 2
2156};
2157
2158enum vxge_hw_status
2159vxge_hw_vpath_mac_addr_add(
2160 struct __vxge_hw_vpath_handle *vpath_handle,
2161 u8 *macaddr,
2162 u8 *macaddr_mask,
2163 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
2164
2165enum vxge_hw_status
2166vxge_hw_vpath_mac_addr_get(
2167 struct __vxge_hw_vpath_handle *vpath_handle,
2168 u8 *macaddr,
2169 u8 *macaddr_mask);
2170
2171enum vxge_hw_status
2172vxge_hw_vpath_mac_addr_get_next(
2173 struct __vxge_hw_vpath_handle *vpath_handle,
2174 u8 *macaddr,
2175 u8 *macaddr_mask);
2176
2177enum vxge_hw_status
2178vxge_hw_vpath_mac_addr_delete(
2179 struct __vxge_hw_vpath_handle *vpath_handle,
2180 u8 *macaddr,
2181 u8 *macaddr_mask);
2182
2183enum vxge_hw_status
2184vxge_hw_vpath_vid_add(
2185 struct __vxge_hw_vpath_handle *vpath_handle,
2186 u64 vid);
2187
2188enum vxge_hw_status
2189vxge_hw_vpath_vid_get(
2190 struct __vxge_hw_vpath_handle *vpath_handle,
2191 u64 *vid);
2192
2193enum vxge_hw_status
2194vxge_hw_vpath_vid_delete(
2195 struct __vxge_hw_vpath_handle *vpath_handle,
2196 u64 vid);
2197
2198enum vxge_hw_status
2199vxge_hw_vpath_etype_add(
2200 struct __vxge_hw_vpath_handle *vpath_handle,
2201 u64 etype);
2202
2203enum vxge_hw_status
2204vxge_hw_vpath_etype_get(
2205 struct __vxge_hw_vpath_handle *vpath_handle,
2206 u64 *etype);
2207
2208enum vxge_hw_status
2209vxge_hw_vpath_etype_get_next(
2210 struct __vxge_hw_vpath_handle *vpath_handle,
2211 u64 *etype);
2212
2213enum vxge_hw_status
2214vxge_hw_vpath_etype_delete(
2215 struct __vxge_hw_vpath_handle *vpath_handle,
2216 u64 etype);
2217
2218enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2219 struct __vxge_hw_vpath_handle *vpath_handle);
2220
2221enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2222 struct __vxge_hw_vpath_handle *vpath_handle);
2223
2224enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2225 struct __vxge_hw_vpath_handle *vpath_handle);
2226
2227enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2228 struct __vxge_hw_vpath_handle *vpath_handle);
2229
2230enum vxge_hw_status vxge_hw_vpath_mcast_disable(
2231 struct __vxge_hw_vpath_handle *vpath_handle);
2232
2233enum vxge_hw_status vxge_hw_vpath_poll_rx(
2234 struct __vxge_hw_ring *ringh);
2235
2236enum vxge_hw_status vxge_hw_vpath_poll_tx(
2237 struct __vxge_hw_fifo *fifoh,
2238 struct sk_buff ***skb_ptr, int nr_skb, int *more);
2239
2240enum vxge_hw_status vxge_hw_vpath_alarm_process(
2241 struct __vxge_hw_vpath_handle *vpath_handle,
2242 u32 skip_alarms);
2243
2244void
2245vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
2246 int *tim_msix_id, int alarm_msix_id);
2247
2248void
2249vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
2250 int msix_id);
2251
2252void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
2253
2254void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
2255
2256void
2257vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
2258 int msix_id);
2259
2260enum vxge_hw_status vxge_hw_vpath_intr_enable(
2261 struct __vxge_hw_vpath_handle *vpath_handle);
2262
2263enum vxge_hw_status vxge_hw_vpath_intr_disable(
2264 struct __vxge_hw_vpath_handle *vpath_handle);
2265
2266void vxge_hw_vpath_inta_mask_tx_rx(
2267 struct __vxge_hw_vpath_handle *vpath_handle);
2268
2269void vxge_hw_vpath_inta_unmask_tx_rx(
2270 struct __vxge_hw_vpath_handle *vpath_handle);
2271
2272void
2273vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
2274
2275void
2276vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
2277
2278void
2279vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
2280
2281void
2282vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
2283 void **dtrh);
2284
2285void
2286vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel);
2287
2288void
2289vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2290
2291int
2292vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2293
2294void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
2295
2296void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
2297
2298#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-version.h b/drivers/net/ethernet/neterion/vxge/vxge-version.h
new file mode 100644
index 000000000000..b9efa28bab3e
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-version.h
@@ -0,0 +1,49 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-version.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef VXGE_VERSION_H
15#define VXGE_VERSION_H
16
17#define VXGE_VERSION_MAJOR "2"
18#define VXGE_VERSION_MINOR "5"
19#define VXGE_VERSION_FIX "3"
20#define VXGE_VERSION_BUILD "22640"
21#define VXGE_VERSION_FOR "k"
22
23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
24
25#define VXGE_DEAD_FW_VER_MAJOR 1
26#define VXGE_DEAD_FW_VER_MINOR 4
27#define VXGE_DEAD_FW_VER_BUILD 4
28
29#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
30 VXGE_DEAD_FW_VER_MINOR, \
31 VXGE_DEAD_FW_VER_BUILD)
32
33#define VXGE_EPROM_FW_VER_MAJOR 1
34#define VXGE_EPROM_FW_VER_MINOR 6
35#define VXGE_EPROM_FW_VER_BUILD 1
36
37#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
38 VXGE_EPROM_FW_VER_MINOR, \
39 VXGE_EPROM_FW_VER_BUILD)
40
41#define VXGE_CERT_FW_VER_MAJOR 1
42#define VXGE_CERT_FW_VER_MINOR 8
43#define VXGE_CERT_FW_VER_BUILD 1
44
45#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
46 VXGE_CERT_FW_VER_MINOR, \
47 VXGE_CERT_FW_VER_BUILD)
48
49#endif