aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/net/qlge
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r--drivers/net/qlge/Makefile7
-rw-r--r--drivers/net/qlge/qlge.h2334
-rw-r--r--drivers/net/qlge/qlge_dbg.c2044
-rw-r--r--drivers/net/qlge/qlge_ethtool.c688
-rw-r--r--drivers/net/qlge/qlge_main.c4987
-rw-r--r--drivers/net/qlge/qlge_mpi.c1284
6 files changed, 11344 insertions, 0 deletions
diff --git a/drivers/net/qlge/Makefile b/drivers/net/qlge/Makefile
new file mode 100644
index 00000000000..8a197658d76
--- /dev/null
+++ b/drivers/net/qlge/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the Qlogic 10GbE PCI Express ethernet driver
3#
4
5obj-$(CONFIG_QLGE) += qlge.o
6
7qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
new file mode 100644
index 00000000000..8731f79c9ef
--- /dev/null
+++ b/drivers/net/qlge/qlge.h
@@ -0,0 +1,2334 @@
1/*
2 * QLogic QLA41xx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qlge for copyright and licensing details.
6 */
7#ifndef _QLGE_H_
8#define _QLGE_H_
9
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <linux/netdevice.h>
13#include <linux/rtnetlink.h>
14#include <linux/if_vlan.h>
15
16/*
17 * General definitions...
18 */
19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "v1.00.00.29.00.00-01"
22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24
25#define QLGE_VENDOR_ID 0x1077
26#define QLGE_DEVICE_ID_8012 0x8012
27#define QLGE_DEVICE_ID_8000 0x8000
28#define MAX_CPUS 8
29#define MAX_TX_RINGS MAX_CPUS
30#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
31
32#define NUM_TX_RING_ENTRIES 256
33#define NUM_RX_RING_ENTRIES 256
34
35#define NUM_SMALL_BUFFERS 512
36#define NUM_LARGE_BUFFERS 512
37#define DB_PAGE_SIZE 4096
38
39/* Calculate the number of (4k) pages required to
40 * contain a buffer queue of the given length.
41 */
42#define MAX_DB_PAGES_PER_BQ(x) \
43 (((x * sizeof(u64)) / DB_PAGE_SIZE) + \
44 (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
45
46#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
47 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
48 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
49#define LARGE_BUFFER_MAX_SIZE 8192
50#define LARGE_BUFFER_MIN_SIZE 2048
51
52#define MAX_CQ 128
53#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
54#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
55#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
56#define UDELAY_COUNT 3
57#define UDELAY_DELAY 100
58
59
60#define TX_DESC_PER_IOCB 8
61/* The maximum number of frags we handle is based
62 * on PAGE_SIZE...
63 */
64#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
65#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
66#else /* all other page sizes */
67#define TX_DESC_PER_OAL 0
68#endif
69
70/* Word shifting for converting 64-bit
71 * address to a series of 16-bit words.
72 * This is used for some MPI firmware
73 * mailbox commands.
74 */
75#define LSW(x) ((u16)(x))
76#define MSW(x) ((u16)((u32)(x) >> 16))
77#define LSD(x) ((u32)((u64)(x)))
78#define MSD(x) ((u32)((((u64)(x)) >> 32)))
79
80/* MPI test register definitions. This register
81 * is used for determining alternate NIC function's
82 * PCI->func number.
83 */
84enum {
85 MPI_TEST_FUNC_PORT_CFG = 0x1002,
86 MPI_TEST_FUNC_PRB_CTL = 0x100e,
87 MPI_TEST_FUNC_PRB_EN = 0x18a20000,
88 MPI_TEST_FUNC_RST_STS = 0x100a,
89 MPI_TEST_FUNC_RST_FRC = 0x00000003,
90 MPI_TEST_NIC_FUNC_MASK = 0x00000007,
91 MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
92 MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
93 MPI_TEST_NIC1_FUNC_SHIFT = 1,
94 MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
95 MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
96 MPI_TEST_NIC2_FUNC_SHIFT = 5,
97 MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
98 MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
99 MPI_TEST_FC1_FUNCTION_SHIFT = 9,
100 MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
101 MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
102 MPI_TEST_FC2_FUNCTION_SHIFT = 13,
103
104 MPI_NIC_READ = 0x00000000,
105 MPI_NIC_REG_BLOCK = 0x00020000,
106 MPI_NIC_FUNCTION_SHIFT = 6,
107};
108
109/*
110 * Processor Address Register (PROC_ADDR) bit definitions.
111 */
112enum {
113
114 /* Misc. stuff */
115 MAILBOX_COUNT = 16,
116 MAILBOX_TIMEOUT = 5,
117
118 PROC_ADDR_RDY = (1 << 31),
119 PROC_ADDR_R = (1 << 30),
120 PROC_ADDR_ERR = (1 << 29),
121 PROC_ADDR_DA = (1 << 28),
122 PROC_ADDR_FUNC0_MBI = 0x00001180,
123 PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
124 PROC_ADDR_FUNC0_CTL = 0x000011a1,
125 PROC_ADDR_FUNC2_MBI = 0x00001280,
126 PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
127 PROC_ADDR_FUNC2_CTL = 0x000012a1,
128 PROC_ADDR_MPI_RISC = 0x00000000,
129 PROC_ADDR_MDE = 0x00010000,
130 PROC_ADDR_REGBLOCK = 0x00020000,
131 PROC_ADDR_RISC_REG = 0x00030000,
132};
133
134/*
135 * System Register (SYS) bit definitions.
136 */
137enum {
138 SYS_EFE = (1 << 0),
139 SYS_FAE = (1 << 1),
140 SYS_MDC = (1 << 2),
141 SYS_DST = (1 << 3),
142 SYS_DWC = (1 << 4),
143 SYS_EVW = (1 << 5),
144 SYS_OMP_DLY_MASK = 0x3f000000,
145 /*
146 * There are no values defined as of edit #15.
147 */
148 SYS_ODI = (1 << 14),
149};
150
151/*
152 * Reset/Failover Register (RST_FO) bit definitions.
153 */
154enum {
155 RST_FO_TFO = (1 << 0),
156 RST_FO_RR_MASK = 0x00060000,
157 RST_FO_RR_CQ_CAM = 0x00000000,
158 RST_FO_RR_DROP = 0x00000002,
159 RST_FO_RR_DQ = 0x00000004,
160 RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
161 RST_FO_FRB = (1 << 12),
162 RST_FO_MOP = (1 << 13),
163 RST_FO_REG = (1 << 14),
164 RST_FO_FR = (1 << 15),
165};
166
167/*
168 * Function Specific Control Register (FSC) bit definitions.
169 */
170enum {
171 FSC_DBRST_MASK = 0x00070000,
172 FSC_DBRST_256 = 0x00000000,
173 FSC_DBRST_512 = 0x00000001,
174 FSC_DBRST_768 = 0x00000002,
175 FSC_DBRST_1024 = 0x00000003,
176 FSC_DBL_MASK = 0x00180000,
177 FSC_DBL_DBRST = 0x00000000,
178 FSC_DBL_MAX_PLD = 0x00000008,
179 FSC_DBL_MAX_BRST = 0x00000010,
180 FSC_DBL_128_BYTES = 0x00000018,
181 FSC_EC = (1 << 5),
182 FSC_EPC_MASK = 0x00c00000,
183 FSC_EPC_INBOUND = (1 << 6),
184 FSC_EPC_OUTBOUND = (1 << 7),
185 FSC_VM_PAGESIZE_MASK = 0x07000000,
186 FSC_VM_PAGE_2K = 0x00000100,
187 FSC_VM_PAGE_4K = 0x00000200,
188 FSC_VM_PAGE_8K = 0x00000300,
189 FSC_VM_PAGE_64K = 0x00000600,
190 FSC_SH = (1 << 11),
191 FSC_DSB = (1 << 12),
192 FSC_STE = (1 << 13),
193 FSC_FE = (1 << 15),
194};
195
196/*
197 * Host Command Status Register (CSR) bit definitions.
198 */
199enum {
200 CSR_ERR_STS_MASK = 0x0000003f,
201 /*
202 * There are no valued defined as of edit #15.
203 */
204 CSR_RR = (1 << 8),
205 CSR_HRI = (1 << 9),
206 CSR_RP = (1 << 10),
207 CSR_CMD_PARM_SHIFT = 22,
208 CSR_CMD_NOP = 0x00000000,
209 CSR_CMD_SET_RST = 0x10000000,
210 CSR_CMD_CLR_RST = 0x20000000,
211 CSR_CMD_SET_PAUSE = 0x30000000,
212 CSR_CMD_CLR_PAUSE = 0x40000000,
213 CSR_CMD_SET_H2R_INT = 0x50000000,
214 CSR_CMD_CLR_H2R_INT = 0x60000000,
215 CSR_CMD_PAR_EN = 0x70000000,
216 CSR_CMD_SET_BAD_PAR = 0x80000000,
217 CSR_CMD_CLR_BAD_PAR = 0x90000000,
218 CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
219};
220
221/*
222 * Configuration Register (CFG) bit definitions.
223 */
224enum {
225 CFG_LRQ = (1 << 0),
226 CFG_DRQ = (1 << 1),
227 CFG_LR = (1 << 2),
228 CFG_DR = (1 << 3),
229 CFG_LE = (1 << 5),
230 CFG_LCQ = (1 << 6),
231 CFG_DCQ = (1 << 7),
232 CFG_Q_SHIFT = 8,
233 CFG_Q_MASK = 0x7f000000,
234};
235
236/*
237 * Status Register (STS) bit definitions.
238 */
239enum {
240 STS_FE = (1 << 0),
241 STS_PI = (1 << 1),
242 STS_PL0 = (1 << 2),
243 STS_PL1 = (1 << 3),
244 STS_PI0 = (1 << 4),
245 STS_PI1 = (1 << 5),
246 STS_FUNC_ID_MASK = 0x000000c0,
247 STS_FUNC_ID_SHIFT = 6,
248 STS_F0E = (1 << 8),
249 STS_F1E = (1 << 9),
250 STS_F2E = (1 << 10),
251 STS_F3E = (1 << 11),
252 STS_NFE = (1 << 12),
253};
254
255/*
256 * Interrupt Enable Register (INTR_EN) bit definitions.
257 */
258enum {
259 INTR_EN_INTR_MASK = 0x007f0000,
260 INTR_EN_TYPE_MASK = 0x03000000,
261 INTR_EN_TYPE_ENABLE = 0x00000100,
262 INTR_EN_TYPE_DISABLE = 0x00000200,
263 INTR_EN_TYPE_READ = 0x00000300,
264 INTR_EN_IHD = (1 << 13),
265 INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
266 INTR_EN_EI = (1 << 14),
267 INTR_EN_EN = (1 << 15),
268};
269
270/*
271 * Interrupt Mask Register (INTR_MASK) bit definitions.
272 */
273enum {
274 INTR_MASK_PI = (1 << 0),
275 INTR_MASK_HL0 = (1 << 1),
276 INTR_MASK_LH0 = (1 << 2),
277 INTR_MASK_HL1 = (1 << 3),
278 INTR_MASK_LH1 = (1 << 4),
279 INTR_MASK_SE = (1 << 5),
280 INTR_MASK_LSC = (1 << 6),
281 INTR_MASK_MC = (1 << 7),
282 INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
283};
284
285/*
286 * Register (REV_ID) bit definitions.
287 */
288enum {
289 REV_ID_MASK = 0x0000000f,
290 REV_ID_NICROLL_SHIFT = 0,
291 REV_ID_NICREV_SHIFT = 4,
292 REV_ID_XGROLL_SHIFT = 8,
293 REV_ID_XGREV_SHIFT = 12,
294 REV_ID_CHIPREV_SHIFT = 28,
295};
296
297/*
298 * Force ECC Error Register (FRC_ECC_ERR) bit definitions.
299 */
300enum {
301 FRC_ECC_ERR_VW = (1 << 12),
302 FRC_ECC_ERR_VB = (1 << 13),
303 FRC_ECC_ERR_NI = (1 << 14),
304 FRC_ECC_ERR_NO = (1 << 15),
305 FRC_ECC_PFE_SHIFT = 16,
306 FRC_ECC_ERR_DO = (1 << 18),
307 FRC_ECC_P14 = (1 << 19),
308};
309
310/*
311 * Error Status Register (ERR_STS) bit definitions.
312 */
313enum {
314 ERR_STS_NOF = (1 << 0),
315 ERR_STS_NIF = (1 << 1),
316 ERR_STS_DRP = (1 << 2),
317 ERR_STS_XGP = (1 << 3),
318 ERR_STS_FOU = (1 << 4),
319 ERR_STS_FOC = (1 << 5),
320 ERR_STS_FOF = (1 << 6),
321 ERR_STS_FIU = (1 << 7),
322 ERR_STS_FIC = (1 << 8),
323 ERR_STS_FIF = (1 << 9),
324 ERR_STS_MOF = (1 << 10),
325 ERR_STS_TA = (1 << 11),
326 ERR_STS_MA = (1 << 12),
327 ERR_STS_MPE = (1 << 13),
328 ERR_STS_SCE = (1 << 14),
329 ERR_STS_STE = (1 << 15),
330 ERR_STS_FOW = (1 << 16),
331 ERR_STS_UE = (1 << 17),
332 ERR_STS_MCH = (1 << 26),
333 ERR_STS_LOC_SHIFT = 27,
334};
335
336/*
337 * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
338 */
339enum {
340 RAM_DBG_ADDR_FW = (1 << 30),
341 RAM_DBG_ADDR_FR = (1 << 31),
342};
343
344/*
345 * Semaphore Register (SEM) bit definitions.
346 */
347enum {
348 /*
349 * Example:
350 * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
351 */
352 SEM_CLEAR = 0,
353 SEM_SET = 1,
354 SEM_FORCE = 3,
355 SEM_XGMAC0_SHIFT = 0,
356 SEM_XGMAC1_SHIFT = 2,
357 SEM_ICB_SHIFT = 4,
358 SEM_MAC_ADDR_SHIFT = 6,
359 SEM_FLASH_SHIFT = 8,
360 SEM_PROBE_SHIFT = 10,
361 SEM_RT_IDX_SHIFT = 12,
362 SEM_PROC_REG_SHIFT = 14,
363 SEM_XGMAC0_MASK = 0x00030000,
364 SEM_XGMAC1_MASK = 0x000c0000,
365 SEM_ICB_MASK = 0x00300000,
366 SEM_MAC_ADDR_MASK = 0x00c00000,
367 SEM_FLASH_MASK = 0x03000000,
368 SEM_PROBE_MASK = 0x0c000000,
369 SEM_RT_IDX_MASK = 0x30000000,
370 SEM_PROC_REG_MASK = 0xc0000000,
371};
372
373/*
374 * 10G MAC Address Register (XGMAC_ADDR) bit definitions.
375 */
376enum {
377 XGMAC_ADDR_RDY = (1 << 31),
378 XGMAC_ADDR_R = (1 << 30),
379 XGMAC_ADDR_XME = (1 << 29),
380
381 /* XGMAC control registers */
382 PAUSE_SRC_LO = 0x00000100,
383 PAUSE_SRC_HI = 0x00000104,
384 GLOBAL_CFG = 0x00000108,
385 GLOBAL_CFG_RESET = (1 << 0),
386 GLOBAL_CFG_JUMBO = (1 << 6),
387 GLOBAL_CFG_TX_STAT_EN = (1 << 10),
388 GLOBAL_CFG_RX_STAT_EN = (1 << 11),
389 TX_CFG = 0x0000010c,
390 TX_CFG_RESET = (1 << 0),
391 TX_CFG_EN = (1 << 1),
392 TX_CFG_PREAM = (1 << 2),
393 RX_CFG = 0x00000110,
394 RX_CFG_RESET = (1 << 0),
395 RX_CFG_EN = (1 << 1),
396 RX_CFG_PREAM = (1 << 2),
397 FLOW_CTL = 0x0000011c,
398 PAUSE_OPCODE = 0x00000120,
399 PAUSE_TIMER = 0x00000124,
400 PAUSE_FRM_DEST_LO = 0x00000128,
401 PAUSE_FRM_DEST_HI = 0x0000012c,
402 MAC_TX_PARAMS = 0x00000134,
403 MAC_TX_PARAMS_JUMBO = (1 << 31),
404 MAC_TX_PARAMS_SIZE_SHIFT = 16,
405 MAC_RX_PARAMS = 0x00000138,
406 MAC_SYS_INT = 0x00000144,
407 MAC_SYS_INT_MASK = 0x00000148,
408 MAC_MGMT_INT = 0x0000014c,
409 MAC_MGMT_IN_MASK = 0x00000150,
410 EXT_ARB_MODE = 0x000001fc,
411
412 /* XGMAC TX statistics registers */
413 TX_PKTS = 0x00000200,
414 TX_BYTES = 0x00000208,
415 TX_MCAST_PKTS = 0x00000210,
416 TX_BCAST_PKTS = 0x00000218,
417 TX_UCAST_PKTS = 0x00000220,
418 TX_CTL_PKTS = 0x00000228,
419 TX_PAUSE_PKTS = 0x00000230,
420 TX_64_PKT = 0x00000238,
421 TX_65_TO_127_PKT = 0x00000240,
422 TX_128_TO_255_PKT = 0x00000248,
423 TX_256_511_PKT = 0x00000250,
424 TX_512_TO_1023_PKT = 0x00000258,
425 TX_1024_TO_1518_PKT = 0x00000260,
426 TX_1519_TO_MAX_PKT = 0x00000268,
427 TX_UNDERSIZE_PKT = 0x00000270,
428 TX_OVERSIZE_PKT = 0x00000278,
429
430 /* XGMAC statistics control registers */
431 RX_HALF_FULL_DET = 0x000002a0,
432 TX_HALF_FULL_DET = 0x000002a4,
433 RX_OVERFLOW_DET = 0x000002a8,
434 TX_OVERFLOW_DET = 0x000002ac,
435 RX_HALF_FULL_MASK = 0x000002b0,
436 TX_HALF_FULL_MASK = 0x000002b4,
437 RX_OVERFLOW_MASK = 0x000002b8,
438 TX_OVERFLOW_MASK = 0x000002bc,
439 STAT_CNT_CTL = 0x000002c0,
440 STAT_CNT_CTL_CLEAR_TX = (1 << 0),
441 STAT_CNT_CTL_CLEAR_RX = (1 << 1),
442 AUX_RX_HALF_FULL_DET = 0x000002d0,
443 AUX_TX_HALF_FULL_DET = 0x000002d4,
444 AUX_RX_OVERFLOW_DET = 0x000002d8,
445 AUX_TX_OVERFLOW_DET = 0x000002dc,
446 AUX_RX_HALF_FULL_MASK = 0x000002f0,
447 AUX_TX_HALF_FULL_MASK = 0x000002f4,
448 AUX_RX_OVERFLOW_MASK = 0x000002f8,
449 AUX_TX_OVERFLOW_MASK = 0x000002fc,
450
451 /* XGMAC RX statistics registers */
452 RX_BYTES = 0x00000300,
453 RX_BYTES_OK = 0x00000308,
454 RX_PKTS = 0x00000310,
455 RX_PKTS_OK = 0x00000318,
456 RX_BCAST_PKTS = 0x00000320,
457 RX_MCAST_PKTS = 0x00000328,
458 RX_UCAST_PKTS = 0x00000330,
459 RX_UNDERSIZE_PKTS = 0x00000338,
460 RX_OVERSIZE_PKTS = 0x00000340,
461 RX_JABBER_PKTS = 0x00000348,
462 RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
463 RX_DROP_EVENTS = 0x00000358,
464 RX_FCERR_PKTS = 0x00000360,
465 RX_ALIGN_ERR = 0x00000368,
466 RX_SYMBOL_ERR = 0x00000370,
467 RX_MAC_ERR = 0x00000378,
468 RX_CTL_PKTS = 0x00000380,
469 RX_PAUSE_PKTS = 0x00000388,
470 RX_64_PKTS = 0x00000390,
471 RX_65_TO_127_PKTS = 0x00000398,
472 RX_128_255_PKTS = 0x000003a0,
473 RX_256_511_PKTS = 0x000003a8,
474 RX_512_TO_1023_PKTS = 0x000003b0,
475 RX_1024_TO_1518_PKTS = 0x000003b8,
476 RX_1519_TO_MAX_PKTS = 0x000003c0,
477 RX_LEN_ERR_PKTS = 0x000003c8,
478
479 /* XGMAC MDIO control registers */
480 MDIO_TX_DATA = 0x00000400,
481 MDIO_RX_DATA = 0x00000410,
482 MDIO_CMD = 0x00000420,
483 MDIO_PHY_ADDR = 0x00000430,
484 MDIO_PORT = 0x00000440,
485 MDIO_STATUS = 0x00000450,
486
487 XGMAC_REGISTER_END = 0x00000740,
488};
489
490/*
491 * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
492 */
493enum {
494 ETS_QUEUE_SHIFT = 29,
495 ETS_REF = (1 << 26),
496 ETS_RS = (1 << 27),
497 ETS_P = (1 << 28),
498 ETS_FC_COS_SHIFT = 23,
499};
500
501/*
502 * Flash Address Register (FLASH_ADDR) bit definitions.
503 */
504enum {
505 FLASH_ADDR_RDY = (1 << 31),
506 FLASH_ADDR_R = (1 << 30),
507 FLASH_ADDR_ERR = (1 << 29),
508};
509
510/*
511 * Stop CQ Processing Register (CQ_STOP) bit definitions.
512 */
513enum {
514 CQ_STOP_QUEUE_MASK = (0x007f0000),
515 CQ_STOP_TYPE_MASK = (0x03000000),
516 CQ_STOP_TYPE_START = 0x00000100,
517 CQ_STOP_TYPE_STOP = 0x00000200,
518 CQ_STOP_TYPE_READ = 0x00000300,
519 CQ_STOP_EN = (1 << 15),
520};
521
522/*
523 * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
524 */
525enum {
526 MAC_ADDR_IDX_SHIFT = 4,
527 MAC_ADDR_TYPE_SHIFT = 16,
528 MAC_ADDR_TYPE_COUNT = 10,
529 MAC_ADDR_TYPE_MASK = 0x000f0000,
530 MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
531 MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
532 MAC_ADDR_TYPE_VLAN = 0x00020000,
533 MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
534 MAC_ADDR_TYPE_FC_MAC = 0x00040000,
535 MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
536 MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
537 MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
538 MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
539 MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
540 MAC_ADDR_ADR = (1 << 25),
541 MAC_ADDR_RS = (1 << 26),
542 MAC_ADDR_E = (1 << 27),
543 MAC_ADDR_MR = (1 << 30),
544 MAC_ADDR_MW = (1 << 31),
545 MAX_MULTICAST_ENTRIES = 32,
546
547 /* Entry count and words per entry
548 * for each address type in the filter.
549 */
550 MAC_ADDR_MAX_CAM_ENTRIES = 512,
551 MAC_ADDR_MAX_CAM_WCOUNT = 3,
552 MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
553 MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
554 MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
555 MAC_ADDR_MAX_VLAN_WCOUNT = 1,
556 MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
557 MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
558 MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
559 MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
560 MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
561 MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
562 MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
563 MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
564 MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
565 MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
566 MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
567 MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
568 MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
569 MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
570};
571
572/*
573 * MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
574 */
575enum {
576 SPLT_HDR_EP = (1 << 31),
577};
578
579/*
580 * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
581 */
582enum {
583 FC_RCV_CFG_ECT = (1 << 15),
584 FC_RCV_CFG_DFH = (1 << 20),
585 FC_RCV_CFG_DVF = (1 << 21),
586 FC_RCV_CFG_RCE = (1 << 27),
587 FC_RCV_CFG_RFE = (1 << 28),
588 FC_RCV_CFG_TEE = (1 << 29),
589 FC_RCV_CFG_TCE = (1 << 30),
590 FC_RCV_CFG_TFE = (1 << 31),
591};
592
593/*
594 * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
595 */
596enum {
597 NIC_RCV_CFG_PPE = (1 << 0),
598 NIC_RCV_CFG_VLAN_MASK = 0x00060000,
599 NIC_RCV_CFG_VLAN_ALL = 0x00000000,
600 NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
601 NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
602 NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
603 NIC_RCV_CFG_RV = (1 << 3),
604 NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
605 NIC_RCV_CFG_DFQ_SHIFT = 8,
606 NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */
607};
608
609/*
610 * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
611 */
612enum {
613 MGMT_RCV_CFG_ARP = (1 << 0),
614 MGMT_RCV_CFG_DHC = (1 << 1),
615 MGMT_RCV_CFG_DHS = (1 << 2),
616 MGMT_RCV_CFG_NP = (1 << 3),
617 MGMT_RCV_CFG_I6N = (1 << 4),
618 MGMT_RCV_CFG_I6R = (1 << 5),
619 MGMT_RCV_CFG_DH6 = (1 << 6),
620 MGMT_RCV_CFG_UD1 = (1 << 7),
621 MGMT_RCV_CFG_UD0 = (1 << 8),
622 MGMT_RCV_CFG_BCT = (1 << 9),
623 MGMT_RCV_CFG_MCT = (1 << 10),
624 MGMT_RCV_CFG_DM = (1 << 11),
625 MGMT_RCV_CFG_RM = (1 << 12),
626 MGMT_RCV_CFG_STL = (1 << 13),
627 MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
628 MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
629 MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
630 MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
631 MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
632};
633
634/*
635 * Routing Index Register (RT_IDX) bit definitions.
636 */
637enum {
638 RT_IDX_IDX_SHIFT = 8,
639 RT_IDX_TYPE_MASK = 0x000f0000,
640 RT_IDX_TYPE_SHIFT = 16,
641 RT_IDX_TYPE_RT = 0x00000000,
642 RT_IDX_TYPE_RT_INV = 0x00010000,
643 RT_IDX_TYPE_NICQ = 0x00020000,
644 RT_IDX_TYPE_NICQ_INV = 0x00030000,
645 RT_IDX_DST_MASK = 0x00700000,
646 RT_IDX_DST_RSS = 0x00000000,
647 RT_IDX_DST_CAM_Q = 0x00100000,
648 RT_IDX_DST_COS_Q = 0x00200000,
649 RT_IDX_DST_DFLT_Q = 0x00300000,
650 RT_IDX_DST_DEST_Q = 0x00400000,
651 RT_IDX_RS = (1 << 26),
652 RT_IDX_E = (1 << 27),
653 RT_IDX_MR = (1 << 30),
654 RT_IDX_MW = (1 << 31),
655
656 /* Nic Queue format - type 2 bits */
657 RT_IDX_BCAST = (1 << 0),
658 RT_IDX_MCAST = (1 << 1),
659 RT_IDX_MCAST_MATCH = (1 << 2),
660 RT_IDX_MCAST_REG_MATCH = (1 << 3),
661 RT_IDX_MCAST_HASH_MATCH = (1 << 4),
662 RT_IDX_FC_MACH = (1 << 5),
663 RT_IDX_ETH_FCOE = (1 << 6),
664 RT_IDX_CAM_HIT = (1 << 7),
665 RT_IDX_CAM_BIT0 = (1 << 8),
666 RT_IDX_CAM_BIT1 = (1 << 9),
667 RT_IDX_VLAN_TAG = (1 << 10),
668 RT_IDX_VLAN_MATCH = (1 << 11),
669 RT_IDX_VLAN_FILTER = (1 << 12),
670 RT_IDX_ETH_SKIP1 = (1 << 13),
671 RT_IDX_ETH_SKIP2 = (1 << 14),
672 RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
673 RT_IDX_802_3 = (1 << 16),
674 RT_IDX_LLDP = (1 << 17),
675 RT_IDX_UNUSED018 = (1 << 18),
676 RT_IDX_UNUSED019 = (1 << 19),
677 RT_IDX_UNUSED20 = (1 << 20),
678 RT_IDX_UNUSED21 = (1 << 21),
679 RT_IDX_ERR = (1 << 22),
680 RT_IDX_VALID = (1 << 23),
681 RT_IDX_TU_CSUM_ERR = (1 << 24),
682 RT_IDX_IP_CSUM_ERR = (1 << 25),
683 RT_IDX_MAC_ERR = (1 << 26),
684 RT_IDX_RSS_TCP6 = (1 << 27),
685 RT_IDX_RSS_TCP4 = (1 << 28),
686 RT_IDX_RSS_IPV6 = (1 << 29),
687 RT_IDX_RSS_IPV4 = (1 << 30),
688 RT_IDX_RSS_MATCH = (1 << 31),
689
690 /* Hierarchy for the NIC Queue Mask */
691 RT_IDX_ALL_ERR_SLOT = 0,
692 RT_IDX_MAC_ERR_SLOT = 0,
693 RT_IDX_IP_CSUM_ERR_SLOT = 1,
694 RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
695 RT_IDX_BCAST_SLOT = 3,
696 RT_IDX_MCAST_MATCH_SLOT = 4,
697 RT_IDX_ALLMULTI_SLOT = 5,
698 RT_IDX_UNUSED6_SLOT = 6,
699 RT_IDX_UNUSED7_SLOT = 7,
700 RT_IDX_RSS_MATCH_SLOT = 8,
701 RT_IDX_RSS_IPV4_SLOT = 8,
702 RT_IDX_RSS_IPV6_SLOT = 9,
703 RT_IDX_RSS_TCP4_SLOT = 10,
704 RT_IDX_RSS_TCP6_SLOT = 11,
705 RT_IDX_CAM_HIT_SLOT = 12,
706 RT_IDX_UNUSED013 = 13,
707 RT_IDX_UNUSED014 = 14,
708 RT_IDX_PROMISCUOUS_SLOT = 15,
709 RT_IDX_MAX_RT_SLOTS = 8,
710 RT_IDX_MAX_NIC_SLOTS = 16,
711};
712
713/*
714 * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
715 */
716enum {
717 XG_SERDES_ADDR_RDY = (1 << 31),
718 XG_SERDES_ADDR_R = (1 << 30),
719
720 XG_SERDES_ADDR_STS = 0x00001E06,
721 XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
722 XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
723 XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
724
725 /* Serdes coredump definitions. */
726 XG_SERDES_XAUI_AN_START = 0x00000000,
727 XG_SERDES_XAUI_AN_END = 0x00000034,
728 XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
729 XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
730 XG_SERDES_XFI_AN_START = 0x00001000,
731 XG_SERDES_XFI_AN_END = 0x00001034,
732 XG_SERDES_XFI_TRAIN_START = 0x10001050,
733 XG_SERDES_XFI_TRAIN_END = 0x1000107C,
734 XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
735 XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
736 XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
737 XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
738 XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
739 XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
740 XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
741 XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
742};
743
744/*
745 * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
746 */
747enum {
748 PRB_MX_ADDR_ARE = (1 << 16),
749 PRB_MX_ADDR_UP = (1 << 15),
750 PRB_MX_ADDR_SWP = (1 << 14),
751
752 /* Module select values. */
753 PRB_MX_ADDR_MAX_MODS = 21,
754 PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
755 PRB_MX_ADDR_MOD_SEL_TBD = 0,
756 PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
757 PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
758 PRB_MX_ADDR_MOD_SEL_FRB = 3,
759 PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
760 PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
761 PRB_MX_ADDR_MOD_SEL_DA1 = 6,
762 PRB_MX_ADDR_MOD_SEL_DA2 = 7,
763 PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
764 PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
765 PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
766 PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
767 PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
768 PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
769 PRB_MX_ADDR_MOD_SEL_REG = 14,
770 PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
771 PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
772 PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
773 PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
774 PRB_MX_ADDR_MOD_SEL_MOP = 20,
775 /* Bit fields indicating which modules
776 * are valid for each clock domain.
777 */
778 PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
779 PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
780 PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
781 PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
782 PRB_MX_ADDR_VALID_TOTAL = 34,
783
784 /* Clock domain values. */
785 PRB_MX_ADDR_CLOCK_SHIFT = 6,
786 PRB_MX_ADDR_SYS_CLOCK = 0,
787 PRB_MX_ADDR_PCI_CLOCK = 2,
788 PRB_MX_ADDR_FC_CLOCK = 5,
789 PRB_MX_ADDR_XGM_CLOCK = 6,
790
791 PRB_MX_ADDR_MAX_MUX = 64,
792};
793
794/*
795 * Control Register Set Map
796 */
797enum {
798 PROC_ADDR = 0, /* Use semaphore */
799 PROC_DATA = 0x04, /* Use semaphore */
800 SYS = 0x08,
801 RST_FO = 0x0c,
802 FSC = 0x10,
803 CSR = 0x14,
804 LED = 0x18,
805 ICB_RID = 0x1c, /* Use semaphore */
806 ICB_L = 0x20, /* Use semaphore */
807 ICB_H = 0x24, /* Use semaphore */
808 CFG = 0x28,
809 BIOS_ADDR = 0x2c,
810 STS = 0x30,
811 INTR_EN = 0x34,
812 INTR_MASK = 0x38,
813 ISR1 = 0x3c,
814 ISR2 = 0x40,
815 ISR3 = 0x44,
816 ISR4 = 0x48,
817 REV_ID = 0x4c,
818 FRC_ECC_ERR = 0x50,
819 ERR_STS = 0x54,
820 RAM_DBG_ADDR = 0x58,
821 RAM_DBG_DATA = 0x5c,
822 ECC_ERR_CNT = 0x60,
823 SEM = 0x64,
824 GPIO_1 = 0x68, /* Use semaphore */
825 GPIO_2 = 0x6c, /* Use semaphore */
826 GPIO_3 = 0x70, /* Use semaphore */
827 RSVD2 = 0x74,
828 XGMAC_ADDR = 0x78, /* Use semaphore */
829 XGMAC_DATA = 0x7c, /* Use semaphore */
830 NIC_ETS = 0x80,
831 CNA_ETS = 0x84,
832 FLASH_ADDR = 0x88, /* Use semaphore */
833 FLASH_DATA = 0x8c, /* Use semaphore */
834 CQ_STOP = 0x90,
835 PAGE_TBL_RID = 0x94,
836 WQ_PAGE_TBL_LO = 0x98,
837 WQ_PAGE_TBL_HI = 0x9c,
838 CQ_PAGE_TBL_LO = 0xa0,
839 CQ_PAGE_TBL_HI = 0xa4,
840 MAC_ADDR_IDX = 0xa8, /* Use semaphore */
841 MAC_ADDR_DATA = 0xac, /* Use semaphore */
842 COS_DFLT_CQ1 = 0xb0,
843 COS_DFLT_CQ2 = 0xb4,
844 ETYPE_SKIP1 = 0xb8,
845 ETYPE_SKIP2 = 0xbc,
846 SPLT_HDR = 0xc0,
847 FC_PAUSE_THRES = 0xc4,
848 NIC_PAUSE_THRES = 0xc8,
849 FC_ETHERTYPE = 0xcc,
850 FC_RCV_CFG = 0xd0,
851 NIC_RCV_CFG = 0xd4,
852 FC_COS_TAGS = 0xd8,
853 NIC_COS_TAGS = 0xdc,
854 MGMT_RCV_CFG = 0xe0,
855 RT_IDX = 0xe4,
856 RT_DATA = 0xe8,
857 RSVD7 = 0xec,
858 XG_SERDES_ADDR = 0xf0,
859 XG_SERDES_DATA = 0xf4,
860 PRB_MX_ADDR = 0xf8, /* Use semaphore */
861 PRB_MX_DATA = 0xfc, /* Use semaphore */
862};
863
864#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
865#define SMALL_BUFFER_SIZE 256
866#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
867#define SPLT_SETTING FSC_DBRST_1024
868#define SPLT_LEN 0
869#define QLGE_SB_PAD 0
870#else
871#define SMALL_BUFFER_SIZE 512
872#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
873#define SPLT_SETTING FSC_SH
874#define SPLT_LEN (SPLT_HDR_EP | \
875 min(SMALL_BUF_MAP_SIZE, 1023))
876#define QLGE_SB_PAD 32
877#endif
878
879/*
880 * CAM output format.
881 */
882enum {
883 CAM_OUT_ROUTE_FC = 0,
884 CAM_OUT_ROUTE_NIC = 1,
885 CAM_OUT_FUNC_SHIFT = 2,
886 CAM_OUT_RV = (1 << 4),
887 CAM_OUT_SH = (1 << 15),
888 CAM_OUT_CQ_ID_SHIFT = 5,
889};
890
891/*
892 * Mailbox definitions
893 */
894enum {
895 /* Asynchronous Event Notifications */
896 AEN_SYS_ERR = 0x00008002,
897 AEN_LINK_UP = 0x00008011,
898 AEN_LINK_DOWN = 0x00008012,
899 AEN_IDC_CMPLT = 0x00008100,
900 AEN_IDC_REQ = 0x00008101,
901 AEN_IDC_EXT = 0x00008102,
902 AEN_DCBX_CHG = 0x00008110,
903 AEN_AEN_LOST = 0x00008120,
904 AEN_AEN_SFP_IN = 0x00008130,
905 AEN_AEN_SFP_OUT = 0x00008131,
906 AEN_FW_INIT_DONE = 0x00008400,
907 AEN_FW_INIT_FAIL = 0x00008401,
908
909 /* Mailbox Command Opcodes. */
910 MB_CMD_NOP = 0x00000000,
911 MB_CMD_EX_FW = 0x00000002,
912 MB_CMD_MB_TEST = 0x00000006,
913 MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
914 MB_CMD_ABOUT_FW = 0x00000008,
915 MB_CMD_COPY_RISC_RAM = 0x0000000a,
916 MB_CMD_LOAD_RISC_RAM = 0x0000000b,
917 MB_CMD_DUMP_RISC_RAM = 0x0000000c,
918 MB_CMD_WRITE_RAM = 0x0000000d,
919 MB_CMD_INIT_RISC_RAM = 0x0000000e,
920 MB_CMD_READ_RAM = 0x0000000f,
921 MB_CMD_STOP_FW = 0x00000014,
922 MB_CMD_MAKE_SYS_ERR = 0x0000002a,
923 MB_CMD_WRITE_SFP = 0x00000030,
924 MB_CMD_READ_SFP = 0x00000031,
925 MB_CMD_INIT_FW = 0x00000060,
926 MB_CMD_GET_IFCB = 0x00000061,
927 MB_CMD_GET_FW_STATE = 0x00000069,
928 MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
929 MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
930 MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
931 MB_WOL_DISABLE = 0,
932 MB_WOL_MAGIC_PKT = (1 << 1),
933 MB_WOL_FLTR = (1 << 2),
934 MB_WOL_UCAST = (1 << 3),
935 MB_WOL_MCAST = (1 << 4),
936 MB_WOL_BCAST = (1 << 5),
937 MB_WOL_LINK_UP = (1 << 6),
938 MB_WOL_LINK_DOWN = (1 << 7),
939 MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */
940 MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
941 MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
942 MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
943 MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */
944 MB_CMD_SET_WOL_IMMED = 0x00000115,
945 MB_CMD_PORT_RESET = 0x00000120,
946 MB_CMD_SET_PORT_CFG = 0x00000122,
947 MB_CMD_GET_PORT_CFG = 0x00000123,
948 MB_CMD_GET_LINK_STS = 0x00000124,
949 MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
950 QL_LED_BLINK = 0x03e803e8,
951 MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
952 MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
953 MB_SET_MPI_TFK_STOP = (1 << 0),
954 MB_SET_MPI_TFK_RESUME = (1 << 1),
955 MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
956 MB_GET_MPI_TFK_STOPPED = (1 << 0),
957 MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
958 /* Sub-commands for IDC request.
959 * This describes the reason for the
960 * IDC request.
961 */
962 MB_CMD_IOP_NONE = 0x0000,
963 MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001,
964 MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002,
965 MB_CMD_IOP_PREP_LINK_DOWN = 0x0010,
966 MB_CMD_IOP_DVR_START = 0x0100,
967 MB_CMD_IOP_FLASH_ACC = 0x0101,
968 MB_CMD_IOP_RESTART_MPI = 0x0102,
969 MB_CMD_IOP_CORE_DUMP_MPI = 0x0103,
970
971 /* Mailbox Command Status. */
972 MB_CMD_STS_GOOD = 0x00004000, /* Success. */
973 MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
974 MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */
975 MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */
976 MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */
977 MB_CMD_STS_ERR = 0x00004005, /* System Error. */
978 MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */
979};
980
981struct mbox_params {
982 u32 mbox_in[MAILBOX_COUNT];
983 u32 mbox_out[MAILBOX_COUNT];
984 int in_count;
985 int out_count;
986};
987
988struct flash_params_8012 {
989 u8 dev_id_str[4];
990 __le16 size;
991 __le16 csum;
992 __le16 ver;
993 __le16 sub_dev_id;
994 u8 mac_addr[6];
995 __le16 res;
996};
997
998/* 8000 device's flash is a different structure
999 * at a different offset in flash.
1000 */
1001#define FUNC0_FLASH_OFFSET 0x140200
1002#define FUNC1_FLASH_OFFSET 0x140600
1003
1004/* Flash related data structures. */
1005struct flash_params_8000 {
1006 u8 dev_id_str[4]; /* "8000" */
1007 __le16 ver;
1008 __le16 size;
1009 __le16 csum;
1010 __le16 reserved0;
1011 __le16 total_size;
1012 __le16 entry_count;
1013 u8 data_type0;
1014 u8 data_size0;
1015 u8 mac_addr[6];
1016 u8 data_type1;
1017 u8 data_size1;
1018 u8 mac_addr1[6];
1019 u8 data_type2;
1020 u8 data_size2;
1021 __le16 vlan_id;
1022 u8 data_type3;
1023 u8 data_size3;
1024 __le16 last;
1025 u8 reserved1[464];
1026 __le16 subsys_ven_id;
1027 __le16 subsys_dev_id;
1028 u8 reserved2[4];
1029};
1030
1031union flash_params {
1032 struct flash_params_8012 flash_params_8012;
1033 struct flash_params_8000 flash_params_8000;
1034};
1035
1036/*
1037 * doorbell space for the rx ring context
1038 */
1039struct rx_doorbell_context {
1040 u32 cnsmr_idx; /* 0x00 */
1041 u32 valid; /* 0x04 */
1042 u32 reserved[4]; /* 0x08-0x14 */
1043 u32 lbq_prod_idx; /* 0x18 */
1044 u32 sbq_prod_idx; /* 0x1c */
1045};
1046
1047/*
1048 * doorbell space for the tx ring context
1049 */
1050struct tx_doorbell_context {
1051 u32 prod_idx; /* 0x00 */
1052 u32 valid; /* 0x04 */
1053 u32 reserved[4]; /* 0x08-0x14 */
1054 u32 lbq_prod_idx; /* 0x18 */
1055 u32 sbq_prod_idx; /* 0x1c */
1056};
1057
1058/* DATA STRUCTURES SHARED WITH HARDWARE. */
1059struct tx_buf_desc {
1060 __le64 addr;
1061 __le32 len;
1062#define TX_DESC_LEN_MASK 0x000fffff
1063#define TX_DESC_C 0x40000000
1064#define TX_DESC_E 0x80000000
1065} __packed;
1066
1067/*
1068 * IOCB Definitions...
1069 */
1070
1071#define OPCODE_OB_MAC_IOCB 0x01
1072#define OPCODE_OB_MAC_TSO_IOCB 0x02
1073#define OPCODE_IB_MAC_IOCB 0x20
1074#define OPCODE_IB_MPI_IOCB 0x21
1075#define OPCODE_IB_AE_IOCB 0x3f
1076
1077struct ob_mac_iocb_req {
1078 u8 opcode;
1079 u8 flags1;
1080#define OB_MAC_IOCB_REQ_OI 0x01
1081#define OB_MAC_IOCB_REQ_I 0x02
1082#define OB_MAC_IOCB_REQ_D 0x08
1083#define OB_MAC_IOCB_REQ_F 0x10
1084 u8 flags2;
1085 u8 flags3;
1086#define OB_MAC_IOCB_DFP 0x02
1087#define OB_MAC_IOCB_V 0x04
1088 __le32 reserved1[2];
1089 __le16 frame_len;
1090#define OB_MAC_IOCB_LEN_MASK 0x3ffff
1091 __le16 reserved2;
1092 u32 tid;
1093 u32 txq_idx;
1094 __le32 reserved3;
1095 __le16 vlan_tci;
1096 __le16 reserved4;
1097 struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
1098} __packed;
1099
1100struct ob_mac_iocb_rsp {
1101 u8 opcode; /* */
1102 u8 flags1; /* */
1103#define OB_MAC_IOCB_RSP_OI 0x01 /* */
1104#define OB_MAC_IOCB_RSP_I 0x02 /* */
1105#define OB_MAC_IOCB_RSP_E 0x08 /* */
1106#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */
1107#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */
1108#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */
1109 u8 flags2; /* */
1110 u8 flags3; /* */
1111#define OB_MAC_IOCB_RSP_B 0x80 /* */
1112 u32 tid;
1113 u32 txq_idx;
1114 __le32 reserved[13];
1115} __packed;
1116
1117struct ob_mac_tso_iocb_req {
1118 u8 opcode;
1119 u8 flags1;
1120#define OB_MAC_TSO_IOCB_OI 0x01
1121#define OB_MAC_TSO_IOCB_I 0x02
1122#define OB_MAC_TSO_IOCB_D 0x08
1123#define OB_MAC_TSO_IOCB_IP4 0x40
1124#define OB_MAC_TSO_IOCB_IP6 0x80
1125 u8 flags2;
1126#define OB_MAC_TSO_IOCB_LSO 0x20
1127#define OB_MAC_TSO_IOCB_UC 0x40
1128#define OB_MAC_TSO_IOCB_TC 0x80
1129 u8 flags3;
1130#define OB_MAC_TSO_IOCB_IC 0x01
1131#define OB_MAC_TSO_IOCB_DFP 0x02
1132#define OB_MAC_TSO_IOCB_V 0x04
1133 __le32 reserved1[2];
1134 __le32 frame_len;
1135 u32 tid;
1136 u32 txq_idx;
1137 __le16 total_hdrs_len;
1138 __le16 net_trans_offset;
1139#define OB_MAC_TRANSPORT_HDR_SHIFT 6
1140 __le16 vlan_tci;
1141 __le16 mss;
1142 struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
1143} __packed;
1144
1145struct ob_mac_tso_iocb_rsp {
1146 u8 opcode;
1147 u8 flags1;
1148#define OB_MAC_TSO_IOCB_RSP_OI 0x01
1149#define OB_MAC_TSO_IOCB_RSP_I 0x02
1150#define OB_MAC_TSO_IOCB_RSP_E 0x08
1151#define OB_MAC_TSO_IOCB_RSP_S 0x10
1152#define OB_MAC_TSO_IOCB_RSP_L 0x20
1153#define OB_MAC_TSO_IOCB_RSP_P 0x40
1154 u8 flags2; /* */
1155 u8 flags3; /* */
1156#define OB_MAC_TSO_IOCB_RSP_B 0x8000
1157 u32 tid;
1158 u32 txq_idx;
1159 __le32 reserved2[13];
1160} __packed;
1161
1162struct ib_mac_iocb_rsp {
1163 u8 opcode; /* 0x20 */
1164 u8 flags1;
1165#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
1166#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
1167#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
1168#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
1169#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
1170#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
1171#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
1172#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
1173#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
1174#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
1175#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
1176#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
1177 u8 flags2;
1178#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
1179#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */
1180#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */
1181#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
1182#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
1183#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
1184#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
1185#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
1186#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
1187#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */
1188#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */
1189#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
1190 u8 flags3;
1191#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
1192#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
1193#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
1194#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
1195#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
1196#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
1197#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
1198#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
1199#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
1200#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
1201#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
1202 __le32 data_len; /* */
1203 __le64 data_addr; /* */
1204 __le32 rss; /* */
1205 __le16 vlan_id; /* 12 bits */
1206#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
1207#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
1208#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff
1209
1210 __le16 reserved1;
1211 __le32 reserved2[6];
1212 u8 reserved3[3];
1213 u8 flags4;
1214#define IB_MAC_IOCB_RSP_HV 0x20
1215#define IB_MAC_IOCB_RSP_HS 0x40
1216#define IB_MAC_IOCB_RSP_HL 0x80
1217 __le32 hdr_len; /* */
1218 __le64 hdr_addr; /* */
1219} __packed;
1220
1221struct ib_ae_iocb_rsp {
1222 u8 opcode;
1223 u8 flags1;
1224#define IB_AE_IOCB_RSP_OI 0x01
1225#define IB_AE_IOCB_RSP_I 0x02
1226 u8 event;
1227#define LINK_UP_EVENT 0x00
1228#define LINK_DOWN_EVENT 0x01
1229#define CAM_LOOKUP_ERR_EVENT 0x06
1230#define SOFT_ECC_ERROR_EVENT 0x07
1231#define MGMT_ERR_EVENT 0x08
1232#define TEN_GIG_MAC_EVENT 0x09
1233#define GPI0_H2L_EVENT 0x10
1234#define GPI0_L2H_EVENT 0x20
1235#define GPI1_H2L_EVENT 0x11
1236#define GPI1_L2H_EVENT 0x21
1237#define PCI_ERR_ANON_BUF_RD 0x40
1238 u8 q_id;
1239 __le32 reserved[15];
1240} __packed;
1241
1242/*
1243 * These three structures are for generic
1244 * handling of ib and ob iocbs.
1245 */
1246struct ql_net_rsp_iocb {
1247 u8 opcode;
1248 u8 flags0;
1249 __le16 length;
1250 __le32 tid;
1251 __le32 reserved[14];
1252} __packed;
1253
1254struct net_req_iocb {
1255 u8 opcode;
1256 u8 flags0;
1257 __le16 flags1;
1258 __le32 tid;
1259 __le32 reserved1[30];
1260} __packed;
1261
1262/*
1263 * tx ring initialization control block for chip.
1264 * It is defined as:
1265 * "Work Queue Initialization Control Block"
1266 */
1267struct wqicb {
1268 __le16 len;
1269#define Q_LEN_V (1 << 4)
1270#define Q_LEN_CPP_CONT 0x0000
1271#define Q_LEN_CPP_16 0x0001
1272#define Q_LEN_CPP_32 0x0002
1273#define Q_LEN_CPP_64 0x0003
1274#define Q_LEN_CPP_512 0x0006
1275 __le16 flags;
1276#define Q_PRI_SHIFT 1
1277#define Q_FLAGS_LC 0x1000
1278#define Q_FLAGS_LB 0x2000
1279#define Q_FLAGS_LI 0x4000
1280#define Q_FLAGS_LO 0x8000
1281 __le16 cq_id_rss;
1282#define Q_CQ_ID_RSS_RV 0x8000
1283 __le16 rid;
1284 __le64 addr;
1285 __le64 cnsmr_idx_addr;
1286} __packed;
1287
1288/*
1289 * rx ring initialization control block for chip.
1290 * It is defined as:
1291 * "Completion Queue Initialization Control Block"
1292 */
1293struct cqicb {
1294 u8 msix_vect;
1295 u8 reserved1;
1296 u8 reserved2;
1297 u8 flags;
1298#define FLAGS_LV 0x08
1299#define FLAGS_LS 0x10
1300#define FLAGS_LL 0x20
1301#define FLAGS_LI 0x40
1302#define FLAGS_LC 0x80
1303 __le16 len;
1304#define LEN_V (1 << 4)
1305#define LEN_CPP_CONT 0x0000
1306#define LEN_CPP_32 0x0001
1307#define LEN_CPP_64 0x0002
1308#define LEN_CPP_128 0x0003
1309 __le16 rid;
1310 __le64 addr;
1311 __le64 prod_idx_addr;
1312 __le16 pkt_delay;
1313 __le16 irq_delay;
1314 __le64 lbq_addr;
1315 __le16 lbq_buf_size;
1316 __le16 lbq_len; /* entry count */
1317 __le64 sbq_addr;
1318 __le16 sbq_buf_size;
1319 __le16 sbq_len; /* entry count */
1320} __packed;
1321
1322struct ricb {
1323 u8 base_cq;
1324#define RSS_L4K 0x80
1325 u8 flags;
1326#define RSS_L6K 0x01
1327#define RSS_LI 0x02
1328#define RSS_LB 0x04
1329#define RSS_LM 0x08
1330#define RSS_RI4 0x10
1331#define RSS_RT4 0x20
1332#define RSS_RI6 0x40
1333#define RSS_RT6 0x80
1334 __le16 mask;
1335 u8 hash_cq_id[1024];
1336 __le32 ipv6_hash_key[10];
1337 __le32 ipv4_hash_key[4];
1338} __packed;
1339
1340/* SOFTWARE/DRIVER DATA STRUCTURES. */
1341
1342struct oal {
1343 struct tx_buf_desc oal[TX_DESC_PER_OAL];
1344};
1345
1346struct map_list {
1347 DEFINE_DMA_UNMAP_ADDR(mapaddr);
1348 DEFINE_DMA_UNMAP_LEN(maplen);
1349};
1350
1351struct tx_ring_desc {
1352 struct sk_buff *skb;
1353 struct ob_mac_iocb_req *queue_entry;
1354 u32 index;
1355 struct oal oal;
1356 struct map_list map[MAX_SKB_FRAGS + 1];
1357 int map_cnt;
1358 struct tx_ring_desc *next;
1359};
1360
1361struct page_chunk {
1362 struct page *page; /* master page */
1363 char *va; /* virt addr for this chunk */
1364 u64 map; /* mapping for master */
1365 unsigned int offset; /* offset for this chunk */
1366 unsigned int last_flag; /* flag set for last chunk in page */
1367};
1368
1369struct bq_desc {
1370 union {
1371 struct page_chunk pg_chunk;
1372 struct sk_buff *skb;
1373 } p;
1374 __le64 *addr;
1375 u32 index;
1376 DEFINE_DMA_UNMAP_ADDR(mapaddr);
1377 DEFINE_DMA_UNMAP_LEN(maplen);
1378};
1379
1380#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
1381
1382struct tx_ring {
1383 /*
1384 * queue info.
1385 */
1386 struct wqicb wqicb; /* structure used to inform chip of new queue */
1387 void *wq_base; /* pci_alloc:virtual addr for tx */
1388 dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
1389 __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
1390 dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
1391 u32 wq_size; /* size in bytes of queue area */
1392 u32 wq_len; /* number of entries in queue */
1393 void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */
1394 void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */
1395 u16 prod_idx; /* current value for prod idx */
1396 u16 cq_id; /* completion (rx) queue for tx completions */
1397 u8 wq_id; /* queue id for this entry */
1398 u8 reserved1[3];
1399 struct tx_ring_desc *q; /* descriptor list for the queue */
1400 spinlock_t lock;
1401 atomic_t tx_count; /* counts down for every outstanding IO */
1402 atomic_t queue_stopped; /* Turns queue off when full. */
1403 struct delayed_work tx_work;
1404 struct ql_adapter *qdev;
1405 u64 tx_packets;
1406 u64 tx_bytes;
1407 u64 tx_errors;
1408};
1409
1410/*
1411 * Type of inbound queue.
1412 */
1413enum {
1414 DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
1415 TX_Q = 3, /* Handles outbound completions. */
1416 RX_Q = 4, /* Handles inbound completions. */
1417};
1418
1419struct rx_ring {
1420 struct cqicb cqicb; /* The chip's completion queue init control block. */
1421
1422 /* Completion queue elements. */
1423 void *cq_base;
1424 dma_addr_t cq_base_dma;
1425 u32 cq_size;
1426 u32 cq_len;
1427 u16 cq_id;
1428 __le32 *prod_idx_sh_reg; /* Shadowed producer register. */
1429 dma_addr_t prod_idx_sh_reg_dma;
1430 void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
1431 u32 cnsmr_idx; /* current sw idx */
1432 struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */
1433 void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
1434
1435 /* Large buffer queue elements. */
1436 u32 lbq_len; /* entry count */
1437 u32 lbq_size; /* size in bytes of queue */
1438 u32 lbq_buf_size;
1439 void *lbq_base;
1440 dma_addr_t lbq_base_dma;
1441 void *lbq_base_indirect;
1442 dma_addr_t lbq_base_indirect_dma;
1443 struct page_chunk pg_chunk; /* current page for chunks */
1444 struct bq_desc *lbq; /* array of control blocks */
1445 void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
1446 u32 lbq_prod_idx; /* current sw prod idx */
1447 u32 lbq_curr_idx; /* next entry we expect */
1448 u32 lbq_clean_idx; /* beginning of new descs */
1449 u32 lbq_free_cnt; /* free buffer desc cnt */
1450
1451 /* Small buffer queue elements. */
1452 u32 sbq_len; /* entry count */
1453 u32 sbq_size; /* size in bytes of queue */
1454 u32 sbq_buf_size;
1455 void *sbq_base;
1456 dma_addr_t sbq_base_dma;
1457 void *sbq_base_indirect;
1458 dma_addr_t sbq_base_indirect_dma;
1459 struct bq_desc *sbq; /* array of control blocks */
1460 void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
1461 u32 sbq_prod_idx; /* current sw prod idx */
1462 u32 sbq_curr_idx; /* next entry we expect */
1463 u32 sbq_clean_idx; /* beginning of new descs */
1464 u32 sbq_free_cnt; /* free buffer desc cnt */
1465
1466 /* Misc. handler elements. */
1467 u32 type; /* Type of queue, tx, rx. */
1468 u32 irq; /* Which vector this ring is assigned. */
1469 u32 cpu; /* Which CPU this should run on. */
1470 char name[IFNAMSIZ + 5];
1471 struct napi_struct napi;
1472 u8 reserved;
1473 struct ql_adapter *qdev;
1474 u64 rx_packets;
1475 u64 rx_multicast;
1476 u64 rx_bytes;
1477 u64 rx_dropped;
1478 u64 rx_errors;
1479};
1480
1481/*
1482 * RSS Initialization Control Block
1483 */
1484struct hash_id {
1485 u8 value[4];
1486};
1487
1488struct nic_stats {
1489 /*
1490 * These stats come from offset 200h to 278h
1491 * in the XGMAC register.
1492 */
1493 u64 tx_pkts;
1494 u64 tx_bytes;
1495 u64 tx_mcast_pkts;
1496 u64 tx_bcast_pkts;
1497 u64 tx_ucast_pkts;
1498 u64 tx_ctl_pkts;
1499 u64 tx_pause_pkts;
1500 u64 tx_64_pkt;
1501 u64 tx_65_to_127_pkt;
1502 u64 tx_128_to_255_pkt;
1503 u64 tx_256_511_pkt;
1504 u64 tx_512_to_1023_pkt;
1505 u64 tx_1024_to_1518_pkt;
1506 u64 tx_1519_to_max_pkt;
1507 u64 tx_undersize_pkt;
1508 u64 tx_oversize_pkt;
1509
1510 /*
1511 * These stats come from offset 300h to 3C8h
1512 * in the XGMAC register.
1513 */
1514 u64 rx_bytes;
1515 u64 rx_bytes_ok;
1516 u64 rx_pkts;
1517 u64 rx_pkts_ok;
1518 u64 rx_bcast_pkts;
1519 u64 rx_mcast_pkts;
1520 u64 rx_ucast_pkts;
1521 u64 rx_undersize_pkts;
1522 u64 rx_oversize_pkts;
1523 u64 rx_jabber_pkts;
1524 u64 rx_undersize_fcerr_pkts;
1525 u64 rx_drop_events;
1526 u64 rx_fcerr_pkts;
1527 u64 rx_align_err;
1528 u64 rx_symbol_err;
1529 u64 rx_mac_err;
1530 u64 rx_ctl_pkts;
1531 u64 rx_pause_pkts;
1532 u64 rx_64_pkts;
1533 u64 rx_65_to_127_pkts;
1534 u64 rx_128_255_pkts;
1535 u64 rx_256_511_pkts;
1536 u64 rx_512_to_1023_pkts;
1537 u64 rx_1024_to_1518_pkts;
1538 u64 rx_1519_to_max_pkts;
1539 u64 rx_len_err_pkts;
1540 /*
1541 * These stats come from offset 500h to 5C8h
1542 * in the XGMAC register.
1543 */
1544 u64 tx_cbfc_pause_frames0;
1545 u64 tx_cbfc_pause_frames1;
1546 u64 tx_cbfc_pause_frames2;
1547 u64 tx_cbfc_pause_frames3;
1548 u64 tx_cbfc_pause_frames4;
1549 u64 tx_cbfc_pause_frames5;
1550 u64 tx_cbfc_pause_frames6;
1551 u64 tx_cbfc_pause_frames7;
1552 u64 rx_cbfc_pause_frames0;
1553 u64 rx_cbfc_pause_frames1;
1554 u64 rx_cbfc_pause_frames2;
1555 u64 rx_cbfc_pause_frames3;
1556 u64 rx_cbfc_pause_frames4;
1557 u64 rx_cbfc_pause_frames5;
1558 u64 rx_cbfc_pause_frames6;
1559 u64 rx_cbfc_pause_frames7;
1560 u64 rx_nic_fifo_drop;
1561};
1562
1563/* Firmware coredump internal register address/length pairs. */
1564enum {
1565 MPI_CORE_REGS_ADDR = 0x00030000,
1566 MPI_CORE_REGS_CNT = 127,
1567 MPI_CORE_SH_REGS_CNT = 16,
1568 TEST_REGS_ADDR = 0x00001000,
1569 TEST_REGS_CNT = 23,
1570 RMII_REGS_ADDR = 0x00001040,
1571 RMII_REGS_CNT = 64,
1572 FCMAC1_REGS_ADDR = 0x00001080,
1573 FCMAC2_REGS_ADDR = 0x000010c0,
1574 FCMAC_REGS_CNT = 64,
1575 FC1_MBX_REGS_ADDR = 0x00001100,
1576 FC2_MBX_REGS_ADDR = 0x00001240,
1577 FC_MBX_REGS_CNT = 64,
1578 IDE_REGS_ADDR = 0x00001140,
1579 IDE_REGS_CNT = 64,
1580 NIC1_MBX_REGS_ADDR = 0x00001180,
1581 NIC2_MBX_REGS_ADDR = 0x00001280,
1582 NIC_MBX_REGS_CNT = 64,
1583 SMBUS_REGS_ADDR = 0x00001200,
1584 SMBUS_REGS_CNT = 64,
1585 I2C_REGS_ADDR = 0x00001fc0,
1586 I2C_REGS_CNT = 64,
1587 MEMC_REGS_ADDR = 0x00003000,
1588 MEMC_REGS_CNT = 256,
1589 PBUS_REGS_ADDR = 0x00007c00,
1590 PBUS_REGS_CNT = 256,
1591 MDE_REGS_ADDR = 0x00010000,
1592 MDE_REGS_CNT = 6,
1593 CODE_RAM_ADDR = 0x00020000,
1594 CODE_RAM_CNT = 0x2000,
1595 MEMC_RAM_ADDR = 0x00100000,
1596 MEMC_RAM_CNT = 0x2000,
1597};
1598
1599#define MPI_COREDUMP_COOKIE 0x5555aaaa
1600struct mpi_coredump_global_header {
1601 u32 cookie;
1602 u8 idString[16];
1603 u32 timeLo;
1604 u32 timeHi;
1605 u32 imageSize;
1606 u32 headerSize;
1607 u8 info[220];
1608};
1609
1610struct mpi_coredump_segment_header {
1611 u32 cookie;
1612 u32 segNum;
1613 u32 segSize;
1614 u32 extra;
1615 u8 description[16];
1616};
1617
1618/* Firmware coredump header segment numbers. */
1619enum {
1620 CORE_SEG_NUM = 1,
1621 TEST_LOGIC_SEG_NUM = 2,
1622 RMII_SEG_NUM = 3,
1623 FCMAC1_SEG_NUM = 4,
1624 FCMAC2_SEG_NUM = 5,
1625 FC1_MBOX_SEG_NUM = 6,
1626 IDE_SEG_NUM = 7,
1627 NIC1_MBOX_SEG_NUM = 8,
1628 SMBUS_SEG_NUM = 9,
1629 FC2_MBOX_SEG_NUM = 10,
1630 NIC2_MBOX_SEG_NUM = 11,
1631 I2C_SEG_NUM = 12,
1632 MEMC_SEG_NUM = 13,
1633 PBUS_SEG_NUM = 14,
1634 MDE_SEG_NUM = 15,
1635 NIC1_CONTROL_SEG_NUM = 16,
1636 NIC2_CONTROL_SEG_NUM = 17,
1637 NIC1_XGMAC_SEG_NUM = 18,
1638 NIC2_XGMAC_SEG_NUM = 19,
1639 WCS_RAM_SEG_NUM = 20,
1640 MEMC_RAM_SEG_NUM = 21,
1641 XAUI_AN_SEG_NUM = 22,
1642 XAUI_HSS_PCS_SEG_NUM = 23,
1643 XFI_AN_SEG_NUM = 24,
1644 XFI_TRAIN_SEG_NUM = 25,
1645 XFI_HSS_PCS_SEG_NUM = 26,
1646 XFI_HSS_TX_SEG_NUM = 27,
1647 XFI_HSS_RX_SEG_NUM = 28,
1648 XFI_HSS_PLL_SEG_NUM = 29,
1649 MISC_NIC_INFO_SEG_NUM = 30,
1650 INTR_STATES_SEG_NUM = 31,
1651 CAM_ENTRIES_SEG_NUM = 32,
1652 ROUTING_WORDS_SEG_NUM = 33,
1653 ETS_SEG_NUM = 34,
1654 PROBE_DUMP_SEG_NUM = 35,
1655 ROUTING_INDEX_SEG_NUM = 36,
1656 MAC_PROTOCOL_SEG_NUM = 37,
1657 XAUI2_AN_SEG_NUM = 38,
1658 XAUI2_HSS_PCS_SEG_NUM = 39,
1659 XFI2_AN_SEG_NUM = 40,
1660 XFI2_TRAIN_SEG_NUM = 41,
1661 XFI2_HSS_PCS_SEG_NUM = 42,
1662 XFI2_HSS_TX_SEG_NUM = 43,
1663 XFI2_HSS_RX_SEG_NUM = 44,
1664 XFI2_HSS_PLL_SEG_NUM = 45,
1665 SEM_REGS_SEG_NUM = 50
1666
1667};
1668
1669/* There are 64 generic NIC registers. */
1670#define NIC_REGS_DUMP_WORD_COUNT 64
1671/* XGMAC word count. */
1672#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
1673/* Word counts for the SERDES blocks. */
1674#define XG_SERDES_XAUI_AN_COUNT 14
1675#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
1676#define XG_SERDES_XFI_AN_COUNT 14
1677#define XG_SERDES_XFI_TRAIN_COUNT 12
1678#define XG_SERDES_XFI_HSS_PCS_COUNT 15
1679#define XG_SERDES_XFI_HSS_TX_COUNT 32
1680#define XG_SERDES_XFI_HSS_RX_COUNT 32
1681#define XG_SERDES_XFI_HSS_PLL_COUNT 32
1682
1683/* There are 2 CNA ETS and 8 NIC ETS registers. */
1684#define ETS_REGS_DUMP_WORD_COUNT 10
1685
1686/* Each probe mux entry stores the probe type plus 64 entries
1687 * that are each each 64-bits in length. There are a total of
1688 * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
1689 */
1690#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
1691#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
1692 PRB_MX_ADDR_VALID_TOTAL)
1693/* Each routing entry consists of 4 32-bit words.
1694 * They are route type, index, index word, and result.
1695 * There are 2 route blocks with 8 entries each and
1696 * 2 NIC blocks with 16 entries each.
1697 * The totol entries is 48 with 4 words each.
1698 */
1699#define RT_IDX_DUMP_ENTRIES 48
1700#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
1701#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
1702 RT_IDX_DUMP_WORDS_PER_ENTRY)
1703/* There are 10 address blocks in filter, each with
1704 * different entry counts and different word-count-per-entry.
1705 */
1706#define MAC_ADDR_DUMP_ENTRIES \
1707 ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
1708 (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
1709 (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
1710 (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
1711 (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
1712 (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
1713 (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
1714 (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
1715 (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
1716 (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
1717#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
1718#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
1719 MAC_ADDR_DUMP_WORDS_PER_ENTRY)
1720/* Maximum of 4 functions whose semaphore registeres are
1721 * in the coredump.
1722 */
1723#define MAX_SEMAPHORE_FUNCTIONS 4
1724/* Defines for access the MPI shadow registers. */
1725#define RISC_124 0x0003007c
1726#define RISC_127 0x0003007f
1727#define SHADOW_OFFSET 0xb0000000
1728#define SHADOW_REG_SHIFT 20
1729
1730struct ql_nic_misc {
1731 u32 rx_ring_count;
1732 u32 tx_ring_count;
1733 u32 intr_count;
1734 u32 function;
1735};
1736
1737struct ql_reg_dump {
1738
1739 /* segment 0 */
1740 struct mpi_coredump_global_header mpi_global_header;
1741
1742 /* segment 16 */
1743 struct mpi_coredump_segment_header nic_regs_seg_hdr;
1744 u32 nic_regs[64];
1745
1746 /* segment 30 */
1747 struct mpi_coredump_segment_header misc_nic_seg_hdr;
1748 struct ql_nic_misc misc_nic_info;
1749
1750 /* segment 31 */
1751 /* one interrupt state for each CQ */
1752 struct mpi_coredump_segment_header intr_states_seg_hdr;
1753 u32 intr_states[MAX_CPUS];
1754
1755 /* segment 32 */
1756 /* 3 cam words each for 16 unicast,
1757 * 2 cam words for each of 32 multicast.
1758 */
1759 struct mpi_coredump_segment_header cam_entries_seg_hdr;
1760 u32 cam_entries[(16 * 3) + (32 * 3)];
1761
1762 /* segment 33 */
1763 struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
1764 u32 nic_routing_words[16];
1765
1766 /* segment 34 */
1767 struct mpi_coredump_segment_header ets_seg_hdr;
1768 u32 ets[8+2];
1769};
1770
1771struct ql_mpi_coredump {
1772 /* segment 0 */
1773 struct mpi_coredump_global_header mpi_global_header;
1774
1775 /* segment 1 */
1776 struct mpi_coredump_segment_header core_regs_seg_hdr;
1777 u32 mpi_core_regs[MPI_CORE_REGS_CNT];
1778 u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
1779
1780 /* segment 2 */
1781 struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
1782 u32 test_logic_regs[TEST_REGS_CNT];
1783
1784 /* segment 3 */
1785 struct mpi_coredump_segment_header rmii_regs_seg_hdr;
1786 u32 rmii_regs[RMII_REGS_CNT];
1787
1788 /* segment 4 */
1789 struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
1790 u32 fcmac1_regs[FCMAC_REGS_CNT];
1791
1792 /* segment 5 */
1793 struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
1794 u32 fcmac2_regs[FCMAC_REGS_CNT];
1795
1796 /* segment 6 */
1797 struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
1798 u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
1799
1800 /* segment 7 */
1801 struct mpi_coredump_segment_header ide_regs_seg_hdr;
1802 u32 ide_regs[IDE_REGS_CNT];
1803
1804 /* segment 8 */
1805 struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
1806 u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
1807
1808 /* segment 9 */
1809 struct mpi_coredump_segment_header smbus_regs_seg_hdr;
1810 u32 smbus_regs[SMBUS_REGS_CNT];
1811
1812 /* segment 10 */
1813 struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
1814 u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
1815
1816 /* segment 11 */
1817 struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
1818 u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
1819
1820 /* segment 12 */
1821 struct mpi_coredump_segment_header i2c_regs_seg_hdr;
1822 u32 i2c_regs[I2C_REGS_CNT];
1823 /* segment 13 */
1824 struct mpi_coredump_segment_header memc_regs_seg_hdr;
1825 u32 memc_regs[MEMC_REGS_CNT];
1826
1827 /* segment 14 */
1828 struct mpi_coredump_segment_header pbus_regs_seg_hdr;
1829 u32 pbus_regs[PBUS_REGS_CNT];
1830
1831 /* segment 15 */
1832 struct mpi_coredump_segment_header mde_regs_seg_hdr;
1833 u32 mde_regs[MDE_REGS_CNT];
1834
1835 /* segment 16 */
1836 struct mpi_coredump_segment_header nic_regs_seg_hdr;
1837 u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
1838
1839 /* segment 17 */
1840 struct mpi_coredump_segment_header nic2_regs_seg_hdr;
1841 u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
1842
1843 /* segment 18 */
1844 struct mpi_coredump_segment_header xgmac1_seg_hdr;
1845 u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
1846
1847 /* segment 19 */
1848 struct mpi_coredump_segment_header xgmac2_seg_hdr;
1849 u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
1850
1851 /* segment 20 */
1852 struct mpi_coredump_segment_header code_ram_seg_hdr;
1853 u32 code_ram[CODE_RAM_CNT];
1854
1855 /* segment 21 */
1856 struct mpi_coredump_segment_header memc_ram_seg_hdr;
1857 u32 memc_ram[MEMC_RAM_CNT];
1858
1859 /* segment 22 */
1860 struct mpi_coredump_segment_header xaui_an_hdr;
1861 u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
1862
1863 /* segment 23 */
1864 struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
1865 u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
1866
1867 /* segment 24 */
1868 struct mpi_coredump_segment_header xfi_an_hdr;
1869 u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
1870
1871 /* segment 25 */
1872 struct mpi_coredump_segment_header xfi_train_hdr;
1873 u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
1874
1875 /* segment 26 */
1876 struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
1877 u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
1878
1879 /* segment 27 */
1880 struct mpi_coredump_segment_header xfi_hss_tx_hdr;
1881 u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
1882
1883 /* segment 28 */
1884 struct mpi_coredump_segment_header xfi_hss_rx_hdr;
1885 u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
1886
1887 /* segment 29 */
1888 struct mpi_coredump_segment_header xfi_hss_pll_hdr;
1889 u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
1890
1891 /* segment 30 */
1892 struct mpi_coredump_segment_header misc_nic_seg_hdr;
1893 struct ql_nic_misc misc_nic_info;
1894
1895 /* segment 31 */
1896 /* one interrupt state for each CQ */
1897 struct mpi_coredump_segment_header intr_states_seg_hdr;
1898 u32 intr_states[MAX_RX_RINGS];
1899
1900 /* segment 32 */
1901 /* 3 cam words each for 16 unicast,
1902 * 2 cam words for each of 32 multicast.
1903 */
1904 struct mpi_coredump_segment_header cam_entries_seg_hdr;
1905 u32 cam_entries[(16 * 3) + (32 * 3)];
1906
1907 /* segment 33 */
1908 struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
1909 u32 nic_routing_words[16];
1910 /* segment 34 */
1911 struct mpi_coredump_segment_header ets_seg_hdr;
1912 u32 ets[ETS_REGS_DUMP_WORD_COUNT];
1913
1914 /* segment 35 */
1915 struct mpi_coredump_segment_header probe_dump_seg_hdr;
1916 u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
1917
1918 /* segment 36 */
1919 struct mpi_coredump_segment_header routing_reg_seg_hdr;
1920 u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
1921
1922 /* segment 37 */
1923 struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
1924 u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
1925
1926 /* segment 38 */
1927 struct mpi_coredump_segment_header xaui2_an_hdr;
1928 u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
1929
1930 /* segment 39 */
1931 struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
1932 u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
1933
1934 /* segment 40 */
1935 struct mpi_coredump_segment_header xfi2_an_hdr;
1936 u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
1937
1938 /* segment 41 */
1939 struct mpi_coredump_segment_header xfi2_train_hdr;
1940 u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
1941
1942 /* segment 42 */
1943 struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
1944 u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
1945
1946 /* segment 43 */
1947 struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
1948 u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
1949
1950 /* segment 44 */
1951 struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
1952 u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
1953
1954 /* segment 45 */
1955 struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
1956 u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
1957
1958 /* segment 50 */
1959 /* semaphore register for all 5 functions */
1960 struct mpi_coredump_segment_header sem_regs_seg_hdr;
1961 u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
1962};
1963
1964/*
1965 * intr_context structure is used during initialization
1966 * to hook the interrupts. It is also used in a single
1967 * irq environment as a context to the ISR.
1968 */
1969struct intr_context {
1970 struct ql_adapter *qdev;
1971 u32 intr;
1972 u32 irq_mask; /* Mask of which rings the vector services. */
1973 u32 hooked;
1974 u32 intr_en_mask; /* value/mask used to enable this intr */
1975 u32 intr_dis_mask; /* value/mask used to disable this intr */
1976 u32 intr_read_mask; /* value/mask used to read this intr */
1977 char name[IFNAMSIZ * 2];
1978 atomic_t irq_cnt; /* irq_cnt is used in single vector
1979 * environment. It's incremented for each
1980 * irq handler that is scheduled. When each
1981 * handler finishes it decrements irq_cnt and
1982 * enables interrupts if it's zero. */
1983 irq_handler_t handler;
1984};
1985
1986/* adapter flags definitions. */
1987enum {
1988 QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
1989 QL_LEGACY_ENABLED = 1,
1990 QL_MSI_ENABLED = 2,
1991 QL_MSIX_ENABLED = 3,
1992 QL_DMA64 = 4,
1993 QL_PROMISCUOUS = 5,
1994 QL_ALLMULTI = 6,
1995 QL_PORT_CFG = 7,
1996 QL_CAM_RT_SET = 8,
1997 QL_SELFTEST = 9,
1998 QL_LB_LINK_UP = 10,
1999 QL_FRC_COREDUMP = 11,
2000 QL_EEH_FATAL = 12,
2001 QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
2002};
2003
2004/* link_status bit definitions */
2005enum {
2006 STS_LOOPBACK_MASK = 0x00000700,
2007 STS_LOOPBACK_PCS = 0x00000100,
2008 STS_LOOPBACK_HSS = 0x00000200,
2009 STS_LOOPBACK_EXT = 0x00000300,
2010 STS_PAUSE_MASK = 0x000000c0,
2011 STS_PAUSE_STD = 0x00000040,
2012 STS_PAUSE_PRI = 0x00000080,
2013 STS_SPEED_MASK = 0x00000038,
2014 STS_SPEED_100Mb = 0x00000000,
2015 STS_SPEED_1Gb = 0x00000008,
2016 STS_SPEED_10Gb = 0x00000010,
2017 STS_LINK_TYPE_MASK = 0x00000007,
2018 STS_LINK_TYPE_XFI = 0x00000001,
2019 STS_LINK_TYPE_XAUI = 0x00000002,
2020 STS_LINK_TYPE_XFI_BP = 0x00000003,
2021 STS_LINK_TYPE_XAUI_BP = 0x00000004,
2022 STS_LINK_TYPE_10GBASET = 0x00000005,
2023};
2024
2025/* link_config bit definitions */
2026enum {
2027 CFG_JUMBO_FRAME_SIZE = 0x00010000,
2028 CFG_PAUSE_MASK = 0x00000060,
2029 CFG_PAUSE_STD = 0x00000020,
2030 CFG_PAUSE_PRI = 0x00000040,
2031 CFG_DCBX = 0x00000010,
2032 CFG_LOOPBACK_MASK = 0x00000007,
2033 CFG_LOOPBACK_PCS = 0x00000002,
2034 CFG_LOOPBACK_HSS = 0x00000004,
2035 CFG_LOOPBACK_EXT = 0x00000006,
2036 CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580,
2037};
2038
2039struct nic_operations {
2040
2041 int (*get_flash) (struct ql_adapter *);
2042 int (*port_initialize) (struct ql_adapter *);
2043};
2044
2045/*
2046 * The main Adapter structure definition.
2047 * This structure has all fields relevant to the hardware.
2048 */
2049struct ql_adapter {
2050 struct ricb ricb;
2051 unsigned long flags;
2052 u32 wol;
2053
2054 struct nic_stats nic_stats;
2055
2056 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
2057
2058 /* PCI Configuration information for this device */
2059 struct pci_dev *pdev;
2060 struct net_device *ndev; /* Parent NET device */
2061
2062 /* Hardware information */
2063 u32 chip_rev_id;
2064 u32 fw_rev_id;
2065 u32 func; /* PCI function for this adapter */
2066 u32 alt_func; /* PCI function for alternate adapter */
2067 u32 port; /* Port number this adapter */
2068
2069 spinlock_t adapter_lock;
2070 spinlock_t hw_lock;
2071 spinlock_t stats_lock;
2072
2073 /* PCI Bus Relative Register Addresses */
2074 void __iomem *reg_base;
2075 void __iomem *doorbell_area;
2076 u32 doorbell_area_size;
2077
2078 u32 msg_enable;
2079
2080 /* Page for Shadow Registers */
2081 void *rx_ring_shadow_reg_area;
2082 dma_addr_t rx_ring_shadow_reg_dma;
2083 void *tx_ring_shadow_reg_area;
2084 dma_addr_t tx_ring_shadow_reg_dma;
2085
2086 u32 mailbox_in;
2087 u32 mailbox_out;
2088 struct mbox_params idc_mbc;
2089 struct mutex mpi_mutex;
2090
2091 int tx_ring_size;
2092 int rx_ring_size;
2093 u32 intr_count;
2094 struct msix_entry *msi_x_entry;
2095 struct intr_context intr_context[MAX_RX_RINGS];
2096
2097 int tx_ring_count; /* One per online CPU. */
2098 u32 rss_ring_count; /* One per irq vector. */
2099 /*
2100 * rx_ring_count =
2101 * (CPU count * outbound completion rx_ring) +
2102 * (irq_vector_cnt * inbound (RSS) completion rx_ring)
2103 */
2104 int rx_ring_count;
2105 int ring_mem_size;
2106 void *ring_mem;
2107
2108 struct rx_ring rx_ring[MAX_RX_RINGS];
2109 struct tx_ring tx_ring[MAX_TX_RINGS];
2110 unsigned int lbq_buf_order;
2111
2112 int rx_csum;
2113 u32 default_rx_queue;
2114
2115 u16 rx_coalesce_usecs; /* cqicb->int_delay */
2116 u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */
2117 u16 tx_coalesce_usecs; /* cqicb->int_delay */
2118 u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */
2119
2120 u32 xg_sem_mask;
2121 u32 port_link_up;
2122 u32 port_init;
2123 u32 link_status;
2124 struct ql_mpi_coredump *mpi_coredump;
2125 u32 core_is_dumped;
2126 u32 link_config;
2127 u32 led_config;
2128 u32 max_frame_size;
2129
2130 union flash_params flash;
2131
2132 struct workqueue_struct *workqueue;
2133 struct delayed_work asic_reset_work;
2134 struct delayed_work mpi_reset_work;
2135 struct delayed_work mpi_work;
2136 struct delayed_work mpi_port_cfg_work;
2137 struct delayed_work mpi_idc_work;
2138 struct delayed_work mpi_core_to_log;
2139 struct completion ide_completion;
2140 const struct nic_operations *nic_ops;
2141 u16 device_id;
2142 struct timer_list timer;
2143 atomic_t lb_count;
2144 /* Keep local copy of current mac address. */
2145 char current_mac_addr[6];
2146};
2147
2148/*
2149 * Typical Register accessor for memory mapped device.
2150 */
2151static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
2152{
2153 return readl(qdev->reg_base + reg);
2154}
2155
2156/*
2157 * Typical Register accessor for memory mapped device.
2158 */
2159static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
2160{
2161 writel(val, qdev->reg_base + reg);
2162}
2163
2164/*
2165 * Doorbell Registers:
2166 * Doorbell registers are virtual registers in the PCI memory space.
2167 * The space is allocated by the chip during PCI initialization. The
2168 * device driver finds the doorbell address in BAR 3 in PCI config space.
2169 * The registers are used to control outbound and inbound queues. For
2170 * example, the producer index for an outbound queue. Each queue uses
2171 * 1 4k chunk of memory. The lower half of the space is for outbound
2172 * queues. The upper half is for inbound queues.
2173 */
2174static inline void ql_write_db_reg(u32 val, void __iomem *addr)
2175{
2176 writel(val, addr);
2177 mmiowb();
2178}
2179
2180/*
2181 * Shadow Registers:
2182 * Outbound queues have a consumer index that is maintained by the chip.
2183 * Inbound queues have a producer index that is maintained by the chip.
2184 * For lower overhead, these registers are "shadowed" to host memory
2185 * which allows the device driver to track the queue progress without
2186 * PCI reads. When an entry is placed on an inbound queue, the chip will
2187 * update the relevant index register and then copy the value to the
2188 * shadow register in host memory.
2189 */
2190static inline u32 ql_read_sh_reg(__le32 *addr)
2191{
2192 u32 reg;
2193 reg = le32_to_cpu(*addr);
2194 rmb();
2195 return reg;
2196}
2197
2198extern char qlge_driver_name[];
2199extern const char qlge_driver_version[];
2200extern const struct ethtool_ops qlge_ethtool_ops;
2201
2202extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
2203extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
2204extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
2205extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
2206 u32 *value);
2207extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
2208extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
2209 u16 q_id);
2210void ql_queue_fw_error(struct ql_adapter *qdev);
2211void ql_mpi_work(struct work_struct *work);
2212void ql_mpi_reset_work(struct work_struct *work);
2213void ql_mpi_core_to_log(struct work_struct *work);
2214int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
2215void ql_queue_asic_error(struct ql_adapter *qdev);
2216u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
2217void ql_set_ethtool_ops(struct net_device *ndev);
2218int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
2219void ql_mpi_idc_work(struct work_struct *work);
2220void ql_mpi_port_cfg_work(struct work_struct *work);
2221int ql_mb_get_fw_state(struct ql_adapter *qdev);
2222int ql_cam_route_initialize(struct ql_adapter *qdev);
2223int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
2224int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
2225int ql_unpause_mpi_risc(struct ql_adapter *qdev);
2226int ql_pause_mpi_risc(struct ql_adapter *qdev);
2227int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
2228int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
2229int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2230 u32 ram_addr, int word_count);
2231int ql_core_dump(struct ql_adapter *qdev,
2232 struct ql_mpi_coredump *mpi_coredump);
2233int ql_mb_about_fw(struct ql_adapter *qdev);
2234int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
2235int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
2236int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
2237int ql_mb_get_led_cfg(struct ql_adapter *qdev);
2238void ql_link_on(struct ql_adapter *qdev);
2239void ql_link_off(struct ql_adapter *qdev);
2240int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
2241int ql_mb_get_port_cfg(struct ql_adapter *qdev);
2242int ql_mb_set_port_cfg(struct ql_adapter *qdev);
2243int ql_wait_fifo_empty(struct ql_adapter *qdev);
2244void ql_get_dump(struct ql_adapter *qdev, void *buff);
2245void ql_gen_reg_dump(struct ql_adapter *qdev,
2246 struct ql_reg_dump *mpi_coredump);
2247netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
2248void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
2249int ql_own_firmware(struct ql_adapter *qdev);
2250int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
2251
2252/* #define QL_ALL_DUMP */
2253/* #define QL_REG_DUMP */
2254/* #define QL_DEV_DUMP */
2255/* #define QL_CB_DUMP */
2256/* #define QL_IB_DUMP */
2257/* #define QL_OB_DUMP */
2258
2259#ifdef QL_REG_DUMP
2260extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
2261extern void ql_dump_routing_entries(struct ql_adapter *qdev);
2262extern void ql_dump_regs(struct ql_adapter *qdev);
2263#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
2264#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
2265#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
2266#else
2267#define QL_DUMP_REGS(qdev)
2268#define QL_DUMP_ROUTE(qdev)
2269#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
2270#endif
2271
2272#ifdef QL_STAT_DUMP
2273extern void ql_dump_stat(struct ql_adapter *qdev);
2274#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
2275#else
2276#define QL_DUMP_STAT(qdev)
2277#endif
2278
2279#ifdef QL_DEV_DUMP
2280extern void ql_dump_qdev(struct ql_adapter *qdev);
2281#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
2282#else
2283#define QL_DUMP_QDEV(qdev)
2284#endif
2285
2286#ifdef QL_CB_DUMP
2287extern void ql_dump_wqicb(struct wqicb *wqicb);
2288extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
2289extern void ql_dump_ricb(struct ricb *ricb);
2290extern void ql_dump_cqicb(struct cqicb *cqicb);
2291extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
2292extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
2293#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
2294#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
2295#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
2296#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
2297#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
2298#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
2299 ql_dump_hw_cb(qdev, size, bit, q_id)
2300#else
2301#define QL_DUMP_RICB(ricb)
2302#define QL_DUMP_WQICB(wqicb)
2303#define QL_DUMP_TX_RING(tx_ring)
2304#define QL_DUMP_CQICB(cqicb)
2305#define QL_DUMP_RX_RING(rx_ring)
2306#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
2307#endif
2308
2309#ifdef QL_OB_DUMP
2310extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
2311extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
2312extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
2313#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
2314#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
2315#else
2316#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
2317#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
2318#endif
2319
2320#ifdef QL_IB_DUMP
2321extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
2322#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
2323#else
2324#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
2325#endif
2326
2327#ifdef QL_ALL_DUMP
2328extern void ql_dump_all(struct ql_adapter *qdev);
2329#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
2330#else
2331#define QL_DUMP_ALL(qdev)
2332#endif
2333
2334#endif /* _QLGE_H_ */
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
new file mode 100644
index 00000000000..fca804f36d6
--- /dev/null
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -0,0 +1,2044 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/slab.h>
4
5#include "qlge.h"
6
7/* Read a NIC register from the alternate function. */
8static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
9 u32 reg)
10{
11 u32 register_to_read;
12 u32 reg_val;
13 unsigned int status = 0;
14
15 register_to_read = MPI_NIC_REG_BLOCK
16 | MPI_NIC_READ
17 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
18 | reg;
19 status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
20 if (status != 0)
21 return 0xffffffff;
22
23 return reg_val;
24}
25
26/* Write a NIC register from the alternate function. */
27static int ql_write_other_func_reg(struct ql_adapter *qdev,
28 u32 reg, u32 reg_val)
29{
30 u32 register_to_read;
31 int status = 0;
32
33 register_to_read = MPI_NIC_REG_BLOCK
34 | MPI_NIC_READ
35 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
36 | reg;
37 status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
38
39 return status;
40}
41
42static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
43 u32 bit, u32 err_bit)
44{
45 u32 temp;
46 int count = 10;
47
48 while (count) {
49 temp = ql_read_other_func_reg(qdev, reg);
50
51 /* check for errors */
52 if (temp & err_bit)
53 return -1;
54 else if (temp & bit)
55 return 0;
56 mdelay(10);
57 count--;
58 }
59 return -1;
60}
61
62static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
63 u32 *data)
64{
65 int status;
66
67 /* wait for reg to come ready */
68 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
69 XG_SERDES_ADDR_RDY, 0);
70 if (status)
71 goto exit;
72
73 /* set up for reg read */
74 ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
75
76 /* wait for reg to come ready */
77 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
78 XG_SERDES_ADDR_RDY, 0);
79 if (status)
80 goto exit;
81
82 /* get the data */
83 *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
84exit:
85 return status;
86}
87
88/* Read out the SERDES registers */
89static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
90{
91 int status;
92
93 /* wait for reg to come ready */
94 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
95 if (status)
96 goto exit;
97
98 /* set up for reg read */
99 ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
100
101 /* wait for reg to come ready */
102 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
103 if (status)
104 goto exit;
105
106 /* get the data */
107 *data = ql_read32(qdev, XG_SERDES_DATA);
108exit:
109 return status;
110}
111
112static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
113 u32 *direct_ptr, u32 *indirect_ptr,
114 unsigned int direct_valid, unsigned int indirect_valid)
115{
116 unsigned int status;
117
118 status = 1;
119 if (direct_valid)
120 status = ql_read_serdes_reg(qdev, addr, direct_ptr);
121 /* Dead fill any failures or invalids. */
122 if (status)
123 *direct_ptr = 0xDEADBEEF;
124
125 status = 1;
126 if (indirect_valid)
127 status = ql_read_other_func_serdes_reg(
128 qdev, addr, indirect_ptr);
129 /* Dead fill any failures or invalids. */
130 if (status)
131 *indirect_ptr = 0xDEADBEEF;
132}
133
134static int ql_get_serdes_regs(struct ql_adapter *qdev,
135 struct ql_mpi_coredump *mpi_coredump)
136{
137 int status;
138 unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
139 unsigned int xaui_indirect_valid, i;
140 u32 *direct_ptr, temp;
141 u32 *indirect_ptr;
142
143 xfi_direct_valid = xfi_indirect_valid = 0;
144 xaui_direct_valid = xaui_indirect_valid = 1;
145
146 /* The XAUI needs to be read out per port */
147 if (qdev->func & 1) {
148 /* We are NIC 2 */
149 status = ql_read_other_func_serdes_reg(qdev,
150 XG_SERDES_XAUI_HSS_PCS_START, &temp);
151 if (status)
152 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
153 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
154 XG_SERDES_ADDR_XAUI_PWR_DOWN)
155 xaui_indirect_valid = 0;
156
157 status = ql_read_serdes_reg(qdev,
158 XG_SERDES_XAUI_HSS_PCS_START, &temp);
159 if (status)
160 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
161
162 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
163 XG_SERDES_ADDR_XAUI_PWR_DOWN)
164 xaui_direct_valid = 0;
165 } else {
166 /* We are NIC 1 */
167 status = ql_read_other_func_serdes_reg(qdev,
168 XG_SERDES_XAUI_HSS_PCS_START, &temp);
169 if (status)
170 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
171 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
172 XG_SERDES_ADDR_XAUI_PWR_DOWN)
173 xaui_indirect_valid = 0;
174
175 status = ql_read_serdes_reg(qdev,
176 XG_SERDES_XAUI_HSS_PCS_START, &temp);
177 if (status)
178 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
179 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
180 XG_SERDES_ADDR_XAUI_PWR_DOWN)
181 xaui_direct_valid = 0;
182 }
183
184 /*
185 * XFI register is shared so only need to read one
186 * functions and then check the bits.
187 */
188 status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
189 if (status)
190 temp = 0;
191
192 if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
193 XG_SERDES_ADDR_XFI1_PWR_UP) {
194 /* now see if i'm NIC 1 or NIC 2 */
195 if (qdev->func & 1)
196 /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
197 xfi_indirect_valid = 1;
198 else
199 xfi_direct_valid = 1;
200 }
201 if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
202 XG_SERDES_ADDR_XFI2_PWR_UP) {
203 /* now see if i'm NIC 1 or NIC 2 */
204 if (qdev->func & 1)
205 /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
206 xfi_direct_valid = 1;
207 else
208 xfi_indirect_valid = 1;
209 }
210
211 /* Get XAUI_AN register block. */
212 if (qdev->func & 1) {
213 /* Function 2 is direct */
214 direct_ptr = mpi_coredump->serdes2_xaui_an;
215 indirect_ptr = mpi_coredump->serdes_xaui_an;
216 } else {
217 /* Function 1 is direct */
218 direct_ptr = mpi_coredump->serdes_xaui_an;
219 indirect_ptr = mpi_coredump->serdes2_xaui_an;
220 }
221
222 for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
223 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
224 xaui_direct_valid, xaui_indirect_valid);
225
226 /* Get XAUI_HSS_PCS register block. */
227 if (qdev->func & 1) {
228 direct_ptr =
229 mpi_coredump->serdes2_xaui_hss_pcs;
230 indirect_ptr =
231 mpi_coredump->serdes_xaui_hss_pcs;
232 } else {
233 direct_ptr =
234 mpi_coredump->serdes_xaui_hss_pcs;
235 indirect_ptr =
236 mpi_coredump->serdes2_xaui_hss_pcs;
237 }
238
239 for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
240 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
241 xaui_direct_valid, xaui_indirect_valid);
242
243 /* Get XAUI_XFI_AN register block. */
244 if (qdev->func & 1) {
245 direct_ptr = mpi_coredump->serdes2_xfi_an;
246 indirect_ptr = mpi_coredump->serdes_xfi_an;
247 } else {
248 direct_ptr = mpi_coredump->serdes_xfi_an;
249 indirect_ptr = mpi_coredump->serdes2_xfi_an;
250 }
251
252 for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
253 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
254 xfi_direct_valid, xfi_indirect_valid);
255
256 /* Get XAUI_XFI_TRAIN register block. */
257 if (qdev->func & 1) {
258 direct_ptr = mpi_coredump->serdes2_xfi_train;
259 indirect_ptr =
260 mpi_coredump->serdes_xfi_train;
261 } else {
262 direct_ptr = mpi_coredump->serdes_xfi_train;
263 indirect_ptr =
264 mpi_coredump->serdes2_xfi_train;
265 }
266
267 for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
268 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
269 xfi_direct_valid, xfi_indirect_valid);
270
271 /* Get XAUI_XFI_HSS_PCS register block. */
272 if (qdev->func & 1) {
273 direct_ptr =
274 mpi_coredump->serdes2_xfi_hss_pcs;
275 indirect_ptr =
276 mpi_coredump->serdes_xfi_hss_pcs;
277 } else {
278 direct_ptr =
279 mpi_coredump->serdes_xfi_hss_pcs;
280 indirect_ptr =
281 mpi_coredump->serdes2_xfi_hss_pcs;
282 }
283
284 for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
285 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
286 xfi_direct_valid, xfi_indirect_valid);
287
288 /* Get XAUI_XFI_HSS_TX register block. */
289 if (qdev->func & 1) {
290 direct_ptr =
291 mpi_coredump->serdes2_xfi_hss_tx;
292 indirect_ptr =
293 mpi_coredump->serdes_xfi_hss_tx;
294 } else {
295 direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
296 indirect_ptr =
297 mpi_coredump->serdes2_xfi_hss_tx;
298 }
299 for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
300 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
301 xfi_direct_valid, xfi_indirect_valid);
302
303 /* Get XAUI_XFI_HSS_RX register block. */
304 if (qdev->func & 1) {
305 direct_ptr =
306 mpi_coredump->serdes2_xfi_hss_rx;
307 indirect_ptr =
308 mpi_coredump->serdes_xfi_hss_rx;
309 } else {
310 direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
311 indirect_ptr =
312 mpi_coredump->serdes2_xfi_hss_rx;
313 }
314
315 for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
316 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
317 xfi_direct_valid, xfi_indirect_valid);
318
319
320 /* Get XAUI_XFI_HSS_PLL register block. */
321 if (qdev->func & 1) {
322 direct_ptr =
323 mpi_coredump->serdes2_xfi_hss_pll;
324 indirect_ptr =
325 mpi_coredump->serdes_xfi_hss_pll;
326 } else {
327 direct_ptr =
328 mpi_coredump->serdes_xfi_hss_pll;
329 indirect_ptr =
330 mpi_coredump->serdes2_xfi_hss_pll;
331 }
332 for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
333 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
334 xfi_direct_valid, xfi_indirect_valid);
335 return 0;
336}
337
338static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
339 u32 *data)
340{
341 int status = 0;
342
343 /* wait for reg to come ready */
344 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
345 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
346 if (status)
347 goto exit;
348
349 /* set up for reg read */
350 ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
351
352 /* wait for reg to come ready */
353 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
354 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
355 if (status)
356 goto exit;
357
358 /* get the data */
359 *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
360exit:
361 return status;
362}
363
364/* Read the 400 xgmac control/statistics registers
365 * skipping unused locations.
366 */
367static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
368 unsigned int other_function)
369{
370 int status = 0;
371 int i;
372
373 for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
374 /* We're reading 400 xgmac registers, but we filter out
375 * serveral locations that are non-responsive to reads.
376 */
377 if ((i == 0x00000114) ||
378 (i == 0x00000118) ||
379 (i == 0x0000013c) ||
380 (i == 0x00000140) ||
381 (i > 0x00000150 && i < 0x000001fc) ||
382 (i > 0x00000278 && i < 0x000002a0) ||
383 (i > 0x000002c0 && i < 0x000002cf) ||
384 (i > 0x000002dc && i < 0x000002f0) ||
385 (i > 0x000003c8 && i < 0x00000400) ||
386 (i > 0x00000400 && i < 0x00000410) ||
387 (i > 0x00000410 && i < 0x00000420) ||
388 (i > 0x00000420 && i < 0x00000430) ||
389 (i > 0x00000430 && i < 0x00000440) ||
390 (i > 0x00000440 && i < 0x00000450) ||
391 (i > 0x00000450 && i < 0x00000500) ||
392 (i > 0x0000054c && i < 0x00000568) ||
393 (i > 0x000005c8 && i < 0x00000600)) {
394 if (other_function)
395 status =
396 ql_read_other_func_xgmac_reg(qdev, i, buf);
397 else
398 status = ql_read_xgmac_reg(qdev, i, buf);
399
400 if (status)
401 *buf = 0xdeadbeef;
402 break;
403 }
404 }
405 return status;
406}
407
408static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
409{
410 int status = 0;
411 int i;
412
413 for (i = 0; i < 8; i++, buf++) {
414 ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
415 *buf = ql_read32(qdev, NIC_ETS);
416 }
417
418 for (i = 0; i < 2; i++, buf++) {
419 ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
420 *buf = ql_read32(qdev, CNA_ETS);
421 }
422
423 return status;
424}
425
426static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
427{
428 int i;
429
430 for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
431 ql_write32(qdev, INTR_EN,
432 qdev->intr_context[i].intr_read_mask);
433 *buf = ql_read32(qdev, INTR_EN);
434 }
435}
436
437static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
438{
439 int i, status;
440 u32 value[3];
441
442 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
443 if (status)
444 return status;
445
446 for (i = 0; i < 16; i++) {
447 status = ql_get_mac_addr_reg(qdev,
448 MAC_ADDR_TYPE_CAM_MAC, i, value);
449 if (status) {
450 netif_err(qdev, drv, qdev->ndev,
451 "Failed read of mac index register\n");
452 goto err;
453 }
454 *buf++ = value[0]; /* lower MAC address */
455 *buf++ = value[1]; /* upper MAC address */
456 *buf++ = value[2]; /* output */
457 }
458 for (i = 0; i < 32; i++) {
459 status = ql_get_mac_addr_reg(qdev,
460 MAC_ADDR_TYPE_MULTI_MAC, i, value);
461 if (status) {
462 netif_err(qdev, drv, qdev->ndev,
463 "Failed read of mac index register\n");
464 goto err;
465 }
466 *buf++ = value[0]; /* lower Mcast address */
467 *buf++ = value[1]; /* upper Mcast address */
468 }
469err:
470 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
471 return status;
472}
473
474static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
475{
476 int status;
477 u32 value, i;
478
479 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
480 if (status)
481 return status;
482
483 for (i = 0; i < 16; i++) {
484 status = ql_get_routing_reg(qdev, i, &value);
485 if (status) {
486 netif_err(qdev, drv, qdev->ndev,
487 "Failed read of routing index register\n");
488 goto err;
489 } else {
490 *buf++ = value;
491 }
492 }
493err:
494 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
495 return status;
496}
497
498/* Read the MPI Processor shadow registers */
499static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
500{
501 u32 i;
502 int status;
503
504 for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
505 status = ql_write_mpi_reg(qdev, RISC_124,
506 (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
507 if (status)
508 goto end;
509 status = ql_read_mpi_reg(qdev, RISC_127, buf);
510 if (status)
511 goto end;
512 }
513end:
514 return status;
515}
516
517/* Read the MPI Processor core registers */
518static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
519 u32 offset, u32 count)
520{
521 int i, status = 0;
522 for (i = 0; i < count; i++, buf++) {
523 status = ql_read_mpi_reg(qdev, offset + i, buf);
524 if (status)
525 return status;
526 }
527 return status;
528}
529
530/* Read the ASIC probe dump */
531static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
532 u32 valid, u32 *buf)
533{
534 u32 module, mux_sel, probe, lo_val, hi_val;
535
536 for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
537 if (!((valid >> module) & 1))
538 continue;
539 for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
540 probe = clock
541 | PRB_MX_ADDR_ARE
542 | mux_sel
543 | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
544 ql_write32(qdev, PRB_MX_ADDR, probe);
545 lo_val = ql_read32(qdev, PRB_MX_DATA);
546 if (mux_sel == 0) {
547 *buf = probe;
548 buf++;
549 }
550 probe |= PRB_MX_ADDR_UP;
551 ql_write32(qdev, PRB_MX_ADDR, probe);
552 hi_val = ql_read32(qdev, PRB_MX_DATA);
553 *buf = lo_val;
554 buf++;
555 *buf = hi_val;
556 buf++;
557 }
558 }
559 return buf;
560}
561
562static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
563{
564 /* First we have to enable the probe mux */
565 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
566 buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
567 PRB_MX_ADDR_VALID_SYS_MOD, buf);
568 buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
569 PRB_MX_ADDR_VALID_PCI_MOD, buf);
570 buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
571 PRB_MX_ADDR_VALID_XGM_MOD, buf);
572 buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
573 PRB_MX_ADDR_VALID_FC_MOD, buf);
574 return 0;
575
576}
577
578/* Read out the routing index registers */
579static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
580{
581 int status;
582 u32 type, index, index_max;
583 u32 result_index;
584 u32 result_data;
585 u32 val;
586
587 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
588 if (status)
589 return status;
590
591 for (type = 0; type < 4; type++) {
592 if (type < 2)
593 index_max = 8;
594 else
595 index_max = 16;
596 for (index = 0; index < index_max; index++) {
597 val = RT_IDX_RS
598 | (type << RT_IDX_TYPE_SHIFT)
599 | (index << RT_IDX_IDX_SHIFT);
600 ql_write32(qdev, RT_IDX, val);
601 result_index = 0;
602 while ((result_index & RT_IDX_MR) == 0)
603 result_index = ql_read32(qdev, RT_IDX);
604 result_data = ql_read32(qdev, RT_DATA);
605 *buf = type;
606 buf++;
607 *buf = index;
608 buf++;
609 *buf = result_index;
610 buf++;
611 *buf = result_data;
612 buf++;
613 }
614 }
615 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
616 return status;
617}
618
619/* Read out the MAC protocol registers */
620static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
621{
622 u32 result_index, result_data;
623 u32 type;
624 u32 index;
625 u32 offset;
626 u32 val;
627 u32 initial_val = MAC_ADDR_RS;
628 u32 max_index;
629 u32 max_offset;
630
631 for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
632 switch (type) {
633
634 case 0: /* CAM */
635 initial_val |= MAC_ADDR_ADR;
636 max_index = MAC_ADDR_MAX_CAM_ENTRIES;
637 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
638 break;
639 case 1: /* Multicast MAC Address */
640 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
641 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
642 break;
643 case 2: /* VLAN filter mask */
644 case 3: /* MC filter mask */
645 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
646 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
647 break;
648 case 4: /* FC MAC addresses */
649 max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
650 max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
651 break;
652 case 5: /* Mgmt MAC addresses */
653 max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
654 max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
655 break;
656 case 6: /* Mgmt VLAN addresses */
657 max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
658 max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
659 break;
660 case 7: /* Mgmt IPv4 address */
661 max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
662 max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
663 break;
664 case 8: /* Mgmt IPv6 address */
665 max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
666 max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
667 break;
668 case 9: /* Mgmt TCP/UDP Dest port */
669 max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
670 max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
671 break;
672 default:
673 pr_err("Bad type!!! 0x%08x\n", type);
674 max_index = 0;
675 max_offset = 0;
676 break;
677 }
678 for (index = 0; index < max_index; index++) {
679 for (offset = 0; offset < max_offset; offset++) {
680 val = initial_val
681 | (type << MAC_ADDR_TYPE_SHIFT)
682 | (index << MAC_ADDR_IDX_SHIFT)
683 | (offset);
684 ql_write32(qdev, MAC_ADDR_IDX, val);
685 result_index = 0;
686 while ((result_index & MAC_ADDR_MR) == 0) {
687 result_index = ql_read32(qdev,
688 MAC_ADDR_IDX);
689 }
690 result_data = ql_read32(qdev, MAC_ADDR_DATA);
691 *buf = result_index;
692 buf++;
693 *buf = result_data;
694 buf++;
695 }
696 }
697 }
698}
699
700static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
701{
702 u32 func_num, reg, reg_val;
703 int status;
704
705 for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
706 reg = MPI_NIC_REG_BLOCK
707 | (func_num << MPI_NIC_FUNCTION_SHIFT)
708 | (SEM / 4);
709 status = ql_read_mpi_reg(qdev, reg, &reg_val);
710 *buf = reg_val;
711 /* if the read failed then dead fill the element. */
712 if (!status)
713 *buf = 0xdeadbeef;
714 buf++;
715 }
716}
717
718/* Create a coredump segment header */
719static void ql_build_coredump_seg_header(
720 struct mpi_coredump_segment_header *seg_hdr,
721 u32 seg_number, u32 seg_size, u8 *desc)
722{
723 memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
724 seg_hdr->cookie = MPI_COREDUMP_COOKIE;
725 seg_hdr->segNum = seg_number;
726 seg_hdr->segSize = seg_size;
727 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
728}
729
730/*
731 * This function should be called when a coredump / probedump
732 * is to be extracted from the HBA. It is assumed there is a
733 * qdev structure that contains the base address of the register
734 * space for this function as well as a coredump structure that
735 * will contain the dump.
736 */
737int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
738{
739 int status;
740 int i;
741
742 if (!mpi_coredump) {
743 netif_err(qdev, drv, qdev->ndev, "No memory available\n");
744 return -ENOMEM;
745 }
746
747 /* Try to get the spinlock, but dont worry if
748 * it isn't available. If the firmware died it
749 * might be holding the sem.
750 */
751 ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
752
753 status = ql_pause_mpi_risc(qdev);
754 if (status) {
755 netif_err(qdev, drv, qdev->ndev,
756 "Failed RISC pause. Status = 0x%.08x\n", status);
757 goto err;
758 }
759
760 /* Insert the global header */
761 memset(&(mpi_coredump->mpi_global_header), 0,
762 sizeof(struct mpi_coredump_global_header));
763 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
764 mpi_coredump->mpi_global_header.headerSize =
765 sizeof(struct mpi_coredump_global_header);
766 mpi_coredump->mpi_global_header.imageSize =
767 sizeof(struct ql_mpi_coredump);
768 memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
769 sizeof(mpi_coredump->mpi_global_header.idString));
770
771 /* Get generic NIC reg dump */
772 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
773 NIC1_CONTROL_SEG_NUM,
774 sizeof(struct mpi_coredump_segment_header) +
775 sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
776
777 ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
778 NIC2_CONTROL_SEG_NUM,
779 sizeof(struct mpi_coredump_segment_header) +
780 sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
781
782 /* Get XGMac registers. (Segment 18, Rev C. step 21) */
783 ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
784 NIC1_XGMAC_SEG_NUM,
785 sizeof(struct mpi_coredump_segment_header) +
786 sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
787
788 ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
789 NIC2_XGMAC_SEG_NUM,
790 sizeof(struct mpi_coredump_segment_header) +
791 sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
792
793 if (qdev->func & 1) {
794 /* Odd means our function is NIC 2 */
795 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
796 mpi_coredump->nic2_regs[i] =
797 ql_read32(qdev, i * sizeof(u32));
798
799 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
800 mpi_coredump->nic_regs[i] =
801 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
802
803 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
804 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
805 } else {
806 /* Even means our function is NIC 1 */
807 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
808 mpi_coredump->nic_regs[i] =
809 ql_read32(qdev, i * sizeof(u32));
810 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
811 mpi_coredump->nic2_regs[i] =
812 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
813
814 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
815 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
816 }
817
818 /* Rev C. Step 20a */
819 ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
820 XAUI_AN_SEG_NUM,
821 sizeof(struct mpi_coredump_segment_header) +
822 sizeof(mpi_coredump->serdes_xaui_an),
823 "XAUI AN Registers");
824
825 /* Rev C. Step 20b */
826 ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
827 XAUI_HSS_PCS_SEG_NUM,
828 sizeof(struct mpi_coredump_segment_header) +
829 sizeof(mpi_coredump->serdes_xaui_hss_pcs),
830 "XAUI HSS PCS Registers");
831
832 ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
833 sizeof(struct mpi_coredump_segment_header) +
834 sizeof(mpi_coredump->serdes_xfi_an),
835 "XFI AN Registers");
836
837 ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
838 XFI_TRAIN_SEG_NUM,
839 sizeof(struct mpi_coredump_segment_header) +
840 sizeof(mpi_coredump->serdes_xfi_train),
841 "XFI TRAIN Registers");
842
843 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
844 XFI_HSS_PCS_SEG_NUM,
845 sizeof(struct mpi_coredump_segment_header) +
846 sizeof(mpi_coredump->serdes_xfi_hss_pcs),
847 "XFI HSS PCS Registers");
848
849 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
850 XFI_HSS_TX_SEG_NUM,
851 sizeof(struct mpi_coredump_segment_header) +
852 sizeof(mpi_coredump->serdes_xfi_hss_tx),
853 "XFI HSS TX Registers");
854
855 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
856 XFI_HSS_RX_SEG_NUM,
857 sizeof(struct mpi_coredump_segment_header) +
858 sizeof(mpi_coredump->serdes_xfi_hss_rx),
859 "XFI HSS RX Registers");
860
861 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
862 XFI_HSS_PLL_SEG_NUM,
863 sizeof(struct mpi_coredump_segment_header) +
864 sizeof(mpi_coredump->serdes_xfi_hss_pll),
865 "XFI HSS PLL Registers");
866
867 ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
868 XAUI2_AN_SEG_NUM,
869 sizeof(struct mpi_coredump_segment_header) +
870 sizeof(mpi_coredump->serdes2_xaui_an),
871 "XAUI2 AN Registers");
872
873 ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
874 XAUI2_HSS_PCS_SEG_NUM,
875 sizeof(struct mpi_coredump_segment_header) +
876 sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
877 "XAUI2 HSS PCS Registers");
878
879 ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
880 XFI2_AN_SEG_NUM,
881 sizeof(struct mpi_coredump_segment_header) +
882 sizeof(mpi_coredump->serdes2_xfi_an),
883 "XFI2 AN Registers");
884
885 ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
886 XFI2_TRAIN_SEG_NUM,
887 sizeof(struct mpi_coredump_segment_header) +
888 sizeof(mpi_coredump->serdes2_xfi_train),
889 "XFI2 TRAIN Registers");
890
891 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
892 XFI2_HSS_PCS_SEG_NUM,
893 sizeof(struct mpi_coredump_segment_header) +
894 sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
895 "XFI2 HSS PCS Registers");
896
897 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
898 XFI2_HSS_TX_SEG_NUM,
899 sizeof(struct mpi_coredump_segment_header) +
900 sizeof(mpi_coredump->serdes2_xfi_hss_tx),
901 "XFI2 HSS TX Registers");
902
903 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
904 XFI2_HSS_RX_SEG_NUM,
905 sizeof(struct mpi_coredump_segment_header) +
906 sizeof(mpi_coredump->serdes2_xfi_hss_rx),
907 "XFI2 HSS RX Registers");
908
909 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
910 XFI2_HSS_PLL_SEG_NUM,
911 sizeof(struct mpi_coredump_segment_header) +
912 sizeof(mpi_coredump->serdes2_xfi_hss_pll),
913 "XFI2 HSS PLL Registers");
914
915 status = ql_get_serdes_regs(qdev, mpi_coredump);
916 if (status) {
917 netif_err(qdev, drv, qdev->ndev,
918 "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
919 status);
920 goto err;
921 }
922
923 ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
924 CORE_SEG_NUM,
925 sizeof(mpi_coredump->core_regs_seg_hdr) +
926 sizeof(mpi_coredump->mpi_core_regs) +
927 sizeof(mpi_coredump->mpi_core_sh_regs),
928 "Core Registers");
929
930 /* Get the MPI Core Registers */
931 status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
932 MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
933 if (status)
934 goto err;
935 /* Get the 16 MPI shadow registers */
936 status = ql_get_mpi_shadow_regs(qdev,
937 &mpi_coredump->mpi_core_sh_regs[0]);
938 if (status)
939 goto err;
940
941 /* Get the Test Logic Registers */
942 ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
943 TEST_LOGIC_SEG_NUM,
944 sizeof(struct mpi_coredump_segment_header)
945 + sizeof(mpi_coredump->test_logic_regs),
946 "Test Logic Regs");
947 status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
948 TEST_REGS_ADDR, TEST_REGS_CNT);
949 if (status)
950 goto err;
951
952 /* Get the RMII Registers */
953 ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
954 RMII_SEG_NUM,
955 sizeof(struct mpi_coredump_segment_header)
956 + sizeof(mpi_coredump->rmii_regs),
957 "RMII Registers");
958 status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
959 RMII_REGS_ADDR, RMII_REGS_CNT);
960 if (status)
961 goto err;
962
963 /* Get the FCMAC1 Registers */
964 ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
965 FCMAC1_SEG_NUM,
966 sizeof(struct mpi_coredump_segment_header)
967 + sizeof(mpi_coredump->fcmac1_regs),
968 "FCMAC1 Registers");
969 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
970 FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
971 if (status)
972 goto err;
973
974 /* Get the FCMAC2 Registers */
975
976 ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
977 FCMAC2_SEG_NUM,
978 sizeof(struct mpi_coredump_segment_header)
979 + sizeof(mpi_coredump->fcmac2_regs),
980 "FCMAC2 Registers");
981
982 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
983 FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
984 if (status)
985 goto err;
986
987 /* Get the FC1 MBX Registers */
988 ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
989 FC1_MBOX_SEG_NUM,
990 sizeof(struct mpi_coredump_segment_header)
991 + sizeof(mpi_coredump->fc1_mbx_regs),
992 "FC1 MBox Regs");
993 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
994 FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
995 if (status)
996 goto err;
997
998 /* Get the IDE Registers */
999 ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
1000 IDE_SEG_NUM,
1001 sizeof(struct mpi_coredump_segment_header)
1002 + sizeof(mpi_coredump->ide_regs),
1003 "IDE Registers");
1004 status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
1005 IDE_REGS_ADDR, IDE_REGS_CNT);
1006 if (status)
1007 goto err;
1008
1009 /* Get the NIC1 MBX Registers */
1010 ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
1011 NIC1_MBOX_SEG_NUM,
1012 sizeof(struct mpi_coredump_segment_header)
1013 + sizeof(mpi_coredump->nic1_mbx_regs),
1014 "NIC1 MBox Regs");
1015 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
1016 NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1017 if (status)
1018 goto err;
1019
1020 /* Get the SMBus Registers */
1021 ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
1022 SMBUS_SEG_NUM,
1023 sizeof(struct mpi_coredump_segment_header)
1024 + sizeof(mpi_coredump->smbus_regs),
1025 "SMBus Registers");
1026 status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
1027 SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
1028 if (status)
1029 goto err;
1030
1031 /* Get the FC2 MBX Registers */
1032 ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
1033 FC2_MBOX_SEG_NUM,
1034 sizeof(struct mpi_coredump_segment_header)
1035 + sizeof(mpi_coredump->fc2_mbx_regs),
1036 "FC2 MBox Regs");
1037 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
1038 FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
1039 if (status)
1040 goto err;
1041
1042 /* Get the NIC2 MBX Registers */
1043 ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
1044 NIC2_MBOX_SEG_NUM,
1045 sizeof(struct mpi_coredump_segment_header)
1046 + sizeof(mpi_coredump->nic2_mbx_regs),
1047 "NIC2 MBox Regs");
1048 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
1049 NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1050 if (status)
1051 goto err;
1052
1053 /* Get the I2C Registers */
1054 ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
1055 I2C_SEG_NUM,
1056 sizeof(struct mpi_coredump_segment_header)
1057 + sizeof(mpi_coredump->i2c_regs),
1058 "I2C Registers");
1059 status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
1060 I2C_REGS_ADDR, I2C_REGS_CNT);
1061 if (status)
1062 goto err;
1063
1064 /* Get the MEMC Registers */
1065 ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
1066 MEMC_SEG_NUM,
1067 sizeof(struct mpi_coredump_segment_header)
1068 + sizeof(mpi_coredump->memc_regs),
1069 "MEMC Registers");
1070 status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
1071 MEMC_REGS_ADDR, MEMC_REGS_CNT);
1072 if (status)
1073 goto err;
1074
1075 /* Get the PBus Registers */
1076 ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
1077 PBUS_SEG_NUM,
1078 sizeof(struct mpi_coredump_segment_header)
1079 + sizeof(mpi_coredump->pbus_regs),
1080 "PBUS Registers");
1081 status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
1082 PBUS_REGS_ADDR, PBUS_REGS_CNT);
1083 if (status)
1084 goto err;
1085
1086 /* Get the MDE Registers */
1087 ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
1088 MDE_SEG_NUM,
1089 sizeof(struct mpi_coredump_segment_header)
1090 + sizeof(mpi_coredump->mde_regs),
1091 "MDE Registers");
1092 status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
1093 MDE_REGS_ADDR, MDE_REGS_CNT);
1094 if (status)
1095 goto err;
1096
1097 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1098 MISC_NIC_INFO_SEG_NUM,
1099 sizeof(struct mpi_coredump_segment_header)
1100 + sizeof(mpi_coredump->misc_nic_info),
1101 "MISC NIC INFO");
1102 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1103 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1104 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1105 mpi_coredump->misc_nic_info.function = qdev->func;
1106
1107 /* Segment 31 */
1108 /* Get indexed register values. */
1109 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1110 INTR_STATES_SEG_NUM,
1111 sizeof(struct mpi_coredump_segment_header)
1112 + sizeof(mpi_coredump->intr_states),
1113 "INTR States");
1114 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1115
1116 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1117 CAM_ENTRIES_SEG_NUM,
1118 sizeof(struct mpi_coredump_segment_header)
1119 + sizeof(mpi_coredump->cam_entries),
1120 "CAM Entries");
1121 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1122 if (status)
1123 goto err;
1124
1125 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1126 ROUTING_WORDS_SEG_NUM,
1127 sizeof(struct mpi_coredump_segment_header)
1128 + sizeof(mpi_coredump->nic_routing_words),
1129 "Routing Words");
1130 status = ql_get_routing_entries(qdev,
1131 &mpi_coredump->nic_routing_words[0]);
1132 if (status)
1133 goto err;
1134
1135 /* Segment 34 (Rev C. step 23) */
1136 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1137 ETS_SEG_NUM,
1138 sizeof(struct mpi_coredump_segment_header)
1139 + sizeof(mpi_coredump->ets),
1140 "ETS Registers");
1141 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1142 if (status)
1143 goto err;
1144
1145 ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
1146 PROBE_DUMP_SEG_NUM,
1147 sizeof(struct mpi_coredump_segment_header)
1148 + sizeof(mpi_coredump->probe_dump),
1149 "Probe Dump");
1150 ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
1151
1152 ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
1153 ROUTING_INDEX_SEG_NUM,
1154 sizeof(struct mpi_coredump_segment_header)
1155 + sizeof(mpi_coredump->routing_regs),
1156 "Routing Regs");
1157 status = ql_get_routing_index_registers(qdev,
1158 &mpi_coredump->routing_regs[0]);
1159 if (status)
1160 goto err;
1161
1162 ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
1163 MAC_PROTOCOL_SEG_NUM,
1164 sizeof(struct mpi_coredump_segment_header)
1165 + sizeof(mpi_coredump->mac_prot_regs),
1166 "MAC Prot Regs");
1167 ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
1168
1169 /* Get the semaphore registers for all 5 functions */
1170 ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
1171 SEM_REGS_SEG_NUM,
1172 sizeof(struct mpi_coredump_segment_header) +
1173 sizeof(mpi_coredump->sem_regs), "Sem Registers");
1174
1175 ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
1176
1177 /* Prevent the mpi restarting while we dump the memory.*/
1178 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
1179
1180 /* clear the pause */
1181 status = ql_unpause_mpi_risc(qdev);
1182 if (status) {
1183 netif_err(qdev, drv, qdev->ndev,
1184 "Failed RISC unpause. Status = 0x%.08x\n", status);
1185 goto err;
1186 }
1187
1188 /* Reset the RISC so we can dump RAM */
1189 status = ql_hard_reset_mpi_risc(qdev);
1190 if (status) {
1191 netif_err(qdev, drv, qdev->ndev,
1192 "Failed RISC reset. Status = 0x%.08x\n", status);
1193 goto err;
1194 }
1195
1196 ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
1197 WCS_RAM_SEG_NUM,
1198 sizeof(struct mpi_coredump_segment_header)
1199 + sizeof(mpi_coredump->code_ram),
1200 "WCS RAM");
1201 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
1202 CODE_RAM_ADDR, CODE_RAM_CNT);
1203 if (status) {
1204 netif_err(qdev, drv, qdev->ndev,
1205 "Failed Dump of CODE RAM. Status = 0x%.08x\n",
1206 status);
1207 goto err;
1208 }
1209
1210 /* Insert the segment header */
1211 ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
1212 MEMC_RAM_SEG_NUM,
1213 sizeof(struct mpi_coredump_segment_header)
1214 + sizeof(mpi_coredump->memc_ram),
1215 "MEMC RAM");
1216 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
1217 MEMC_RAM_ADDR, MEMC_RAM_CNT);
1218 if (status) {
1219 netif_err(qdev, drv, qdev->ndev,
1220 "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
1221 status);
1222 goto err;
1223 }
1224err:
1225 ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
1226 return status;
1227
1228}
1229
1230static void ql_get_core_dump(struct ql_adapter *qdev)
1231{
1232 if (!ql_own_firmware(qdev)) {
1233 netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
1234 return;
1235 }
1236
1237 if (!netif_running(qdev->ndev)) {
1238 netif_err(qdev, ifup, qdev->ndev,
1239 "Force Coredump can only be done from interface that is up\n");
1240 return;
1241 }
1242 ql_queue_fw_error(qdev);
1243}
1244
1245void ql_gen_reg_dump(struct ql_adapter *qdev,
1246 struct ql_reg_dump *mpi_coredump)
1247{
1248 int i, status;
1249
1250
1251 memset(&(mpi_coredump->mpi_global_header), 0,
1252 sizeof(struct mpi_coredump_global_header));
1253 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
1254 mpi_coredump->mpi_global_header.headerSize =
1255 sizeof(struct mpi_coredump_global_header);
1256 mpi_coredump->mpi_global_header.imageSize =
1257 sizeof(struct ql_reg_dump);
1258 memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
1259 sizeof(mpi_coredump->mpi_global_header.idString));
1260
1261
1262 /* segment 16 */
1263 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1264 MISC_NIC_INFO_SEG_NUM,
1265 sizeof(struct mpi_coredump_segment_header)
1266 + sizeof(mpi_coredump->misc_nic_info),
1267 "MISC NIC INFO");
1268 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1269 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1270 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1271 mpi_coredump->misc_nic_info.function = qdev->func;
1272
1273 /* Segment 16, Rev C. Step 18 */
1274 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
1275 NIC1_CONTROL_SEG_NUM,
1276 sizeof(struct mpi_coredump_segment_header)
1277 + sizeof(mpi_coredump->nic_regs),
1278 "NIC Registers");
1279 /* Get generic reg dump */
1280 for (i = 0; i < 64; i++)
1281 mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
1282
1283 /* Segment 31 */
1284 /* Get indexed register values. */
1285 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1286 INTR_STATES_SEG_NUM,
1287 sizeof(struct mpi_coredump_segment_header)
1288 + sizeof(mpi_coredump->intr_states),
1289 "INTR States");
1290 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1291
1292 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1293 CAM_ENTRIES_SEG_NUM,
1294 sizeof(struct mpi_coredump_segment_header)
1295 + sizeof(mpi_coredump->cam_entries),
1296 "CAM Entries");
1297 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1298 if (status)
1299 return;
1300
1301 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1302 ROUTING_WORDS_SEG_NUM,
1303 sizeof(struct mpi_coredump_segment_header)
1304 + sizeof(mpi_coredump->nic_routing_words),
1305 "Routing Words");
1306 status = ql_get_routing_entries(qdev,
1307 &mpi_coredump->nic_routing_words[0]);
1308 if (status)
1309 return;
1310
1311 /* Segment 34 (Rev C. step 23) */
1312 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1313 ETS_SEG_NUM,
1314 sizeof(struct mpi_coredump_segment_header)
1315 + sizeof(mpi_coredump->ets),
1316 "ETS Registers");
1317 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1318 if (status)
1319 return;
1320}
1321
1322void ql_get_dump(struct ql_adapter *qdev, void *buff)
1323{
1324 /*
1325 * If the dump has already been taken and is stored
1326 * in our internal buffer and if force dump is set then
1327 * just start the spool to dump it to the log file
1328 * and also, take a snapshot of the general regs to
1329 * to the user's buffer or else take complete dump
1330 * to the user's buffer if force is not set.
1331 */
1332
1333 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
1334 if (!ql_core_dump(qdev, buff))
1335 ql_soft_reset_mpi_risc(qdev);
1336 else
1337 netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
1338 } else {
1339 ql_gen_reg_dump(qdev, buff);
1340 ql_get_core_dump(qdev);
1341 }
1342}
1343
1344/* Coredump to messages log file using separate worker thread */
1345void ql_mpi_core_to_log(struct work_struct *work)
1346{
1347 struct ql_adapter *qdev =
1348 container_of(work, struct ql_adapter, mpi_core_to_log.work);
1349 u32 *tmp, count;
1350 int i;
1351
1352 count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
1353 tmp = (u32 *)qdev->mpi_coredump;
1354 netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
1355 "Core is dumping to log file!\n");
1356
1357 for (i = 0; i < count; i += 8) {
1358 pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
1359 "%.08x %.08x %.08x\n", i,
1360 tmp[i + 0],
1361 tmp[i + 1],
1362 tmp[i + 2],
1363 tmp[i + 3],
1364 tmp[i + 4],
1365 tmp[i + 5],
1366 tmp[i + 6],
1367 tmp[i + 7]);
1368 msleep(5);
1369 }
1370}
1371
1372#ifdef QL_REG_DUMP
1373static void ql_dump_intr_states(struct ql_adapter *qdev)
1374{
1375 int i;
1376 u32 value;
1377 for (i = 0; i < qdev->intr_count; i++) {
1378 ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
1379 value = ql_read32(qdev, INTR_EN);
1380 pr_err("%s: Interrupt %d is %s\n",
1381 qdev->ndev->name, i,
1382 (value & INTR_EN_EN ? "enabled" : "disabled"));
1383 }
1384}
1385
1386#define DUMP_XGMAC(qdev, reg) \
1387do { \
1388 u32 data; \
1389 ql_read_xgmac_reg(qdev, reg, &data); \
1390 pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
1391} while (0)
1392
1393void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
1394{
1395 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
1396 pr_err("%s: Couldn't get xgmac sem\n", __func__);
1397 return;
1398 }
1399 DUMP_XGMAC(qdev, PAUSE_SRC_LO);
1400 DUMP_XGMAC(qdev, PAUSE_SRC_HI);
1401 DUMP_XGMAC(qdev, GLOBAL_CFG);
1402 DUMP_XGMAC(qdev, TX_CFG);
1403 DUMP_XGMAC(qdev, RX_CFG);
1404 DUMP_XGMAC(qdev, FLOW_CTL);
1405 DUMP_XGMAC(qdev, PAUSE_OPCODE);
1406 DUMP_XGMAC(qdev, PAUSE_TIMER);
1407 DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
1408 DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
1409 DUMP_XGMAC(qdev, MAC_TX_PARAMS);
1410 DUMP_XGMAC(qdev, MAC_RX_PARAMS);
1411 DUMP_XGMAC(qdev, MAC_SYS_INT);
1412 DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
1413 DUMP_XGMAC(qdev, MAC_MGMT_INT);
1414 DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
1415 DUMP_XGMAC(qdev, EXT_ARB_MODE);
1416 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1417}
1418
1419static void ql_dump_ets_regs(struct ql_adapter *qdev)
1420{
1421}
1422
1423static void ql_dump_cam_entries(struct ql_adapter *qdev)
1424{
1425 int i;
1426 u32 value[3];
1427
1428 i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1429 if (i)
1430 return;
1431 for (i = 0; i < 4; i++) {
1432 if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
1433 pr_err("%s: Failed read of mac index register\n",
1434 __func__);
1435 return;
1436 } else {
1437 if (value[0])
1438 pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
1439 qdev->ndev->name, i, value[1], value[0],
1440 value[2]);
1441 }
1442 }
1443 for (i = 0; i < 32; i++) {
1444 if (ql_get_mac_addr_reg
1445 (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
1446 pr_err("%s: Failed read of mac index register\n",
1447 __func__);
1448 return;
1449 } else {
1450 if (value[0])
1451 pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
1452 qdev->ndev->name, i, value[1], value[0]);
1453 }
1454 }
1455 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1456}
1457
1458void ql_dump_routing_entries(struct ql_adapter *qdev)
1459{
1460 int i;
1461 u32 value;
1462 i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
1463 if (i)
1464 return;
1465 for (i = 0; i < 16; i++) {
1466 value = 0;
1467 if (ql_get_routing_reg(qdev, i, &value)) {
1468 pr_err("%s: Failed read of routing index register\n",
1469 __func__);
1470 return;
1471 } else {
1472 if (value)
1473 pr_err("%s: Routing Mask %d = 0x%.08x\n",
1474 qdev->ndev->name, i, value);
1475 }
1476 }
1477 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
1478}
1479
1480#define DUMP_REG(qdev, reg) \
1481 pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
1482
1483void ql_dump_regs(struct ql_adapter *qdev)
1484{
1485 pr_err("reg dump for function #%d\n", qdev->func);
1486 DUMP_REG(qdev, SYS);
1487 DUMP_REG(qdev, RST_FO);
1488 DUMP_REG(qdev, FSC);
1489 DUMP_REG(qdev, CSR);
1490 DUMP_REG(qdev, ICB_RID);
1491 DUMP_REG(qdev, ICB_L);
1492 DUMP_REG(qdev, ICB_H);
1493 DUMP_REG(qdev, CFG);
1494 DUMP_REG(qdev, BIOS_ADDR);
1495 DUMP_REG(qdev, STS);
1496 DUMP_REG(qdev, INTR_EN);
1497 DUMP_REG(qdev, INTR_MASK);
1498 DUMP_REG(qdev, ISR1);
1499 DUMP_REG(qdev, ISR2);
1500 DUMP_REG(qdev, ISR3);
1501 DUMP_REG(qdev, ISR4);
1502 DUMP_REG(qdev, REV_ID);
1503 DUMP_REG(qdev, FRC_ECC_ERR);
1504 DUMP_REG(qdev, ERR_STS);
1505 DUMP_REG(qdev, RAM_DBG_ADDR);
1506 DUMP_REG(qdev, RAM_DBG_DATA);
1507 DUMP_REG(qdev, ECC_ERR_CNT);
1508 DUMP_REG(qdev, SEM);
1509 DUMP_REG(qdev, GPIO_1);
1510 DUMP_REG(qdev, GPIO_2);
1511 DUMP_REG(qdev, GPIO_3);
1512 DUMP_REG(qdev, XGMAC_ADDR);
1513 DUMP_REG(qdev, XGMAC_DATA);
1514 DUMP_REG(qdev, NIC_ETS);
1515 DUMP_REG(qdev, CNA_ETS);
1516 DUMP_REG(qdev, FLASH_ADDR);
1517 DUMP_REG(qdev, FLASH_DATA);
1518 DUMP_REG(qdev, CQ_STOP);
1519 DUMP_REG(qdev, PAGE_TBL_RID);
1520 DUMP_REG(qdev, WQ_PAGE_TBL_LO);
1521 DUMP_REG(qdev, WQ_PAGE_TBL_HI);
1522 DUMP_REG(qdev, CQ_PAGE_TBL_LO);
1523 DUMP_REG(qdev, CQ_PAGE_TBL_HI);
1524 DUMP_REG(qdev, COS_DFLT_CQ1);
1525 DUMP_REG(qdev, COS_DFLT_CQ2);
1526 DUMP_REG(qdev, SPLT_HDR);
1527 DUMP_REG(qdev, FC_PAUSE_THRES);
1528 DUMP_REG(qdev, NIC_PAUSE_THRES);
1529 DUMP_REG(qdev, FC_ETHERTYPE);
1530 DUMP_REG(qdev, FC_RCV_CFG);
1531 DUMP_REG(qdev, NIC_RCV_CFG);
1532 DUMP_REG(qdev, FC_COS_TAGS);
1533 DUMP_REG(qdev, NIC_COS_TAGS);
1534 DUMP_REG(qdev, MGMT_RCV_CFG);
1535 DUMP_REG(qdev, XG_SERDES_ADDR);
1536 DUMP_REG(qdev, XG_SERDES_DATA);
1537 DUMP_REG(qdev, PRB_MX_ADDR);
1538 DUMP_REG(qdev, PRB_MX_DATA);
1539 ql_dump_intr_states(qdev);
1540 ql_dump_xgmac_control_regs(qdev);
1541 ql_dump_ets_regs(qdev);
1542 ql_dump_cam_entries(qdev);
1543 ql_dump_routing_entries(qdev);
1544}
1545#endif
1546
1547#ifdef QL_STAT_DUMP
1548
1549#define DUMP_STAT(qdev, stat) \
1550 pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
1551
1552void ql_dump_stat(struct ql_adapter *qdev)
1553{
1554 pr_err("%s: Enter\n", __func__);
1555 DUMP_STAT(qdev, tx_pkts);
1556 DUMP_STAT(qdev, tx_bytes);
1557 DUMP_STAT(qdev, tx_mcast_pkts);
1558 DUMP_STAT(qdev, tx_bcast_pkts);
1559 DUMP_STAT(qdev, tx_ucast_pkts);
1560 DUMP_STAT(qdev, tx_ctl_pkts);
1561 DUMP_STAT(qdev, tx_pause_pkts);
1562 DUMP_STAT(qdev, tx_64_pkt);
1563 DUMP_STAT(qdev, tx_65_to_127_pkt);
1564 DUMP_STAT(qdev, tx_128_to_255_pkt);
1565 DUMP_STAT(qdev, tx_256_511_pkt);
1566 DUMP_STAT(qdev, tx_512_to_1023_pkt);
1567 DUMP_STAT(qdev, tx_1024_to_1518_pkt);
1568 DUMP_STAT(qdev, tx_1519_to_max_pkt);
1569 DUMP_STAT(qdev, tx_undersize_pkt);
1570 DUMP_STAT(qdev, tx_oversize_pkt);
1571 DUMP_STAT(qdev, rx_bytes);
1572 DUMP_STAT(qdev, rx_bytes_ok);
1573 DUMP_STAT(qdev, rx_pkts);
1574 DUMP_STAT(qdev, rx_pkts_ok);
1575 DUMP_STAT(qdev, rx_bcast_pkts);
1576 DUMP_STAT(qdev, rx_mcast_pkts);
1577 DUMP_STAT(qdev, rx_ucast_pkts);
1578 DUMP_STAT(qdev, rx_undersize_pkts);
1579 DUMP_STAT(qdev, rx_oversize_pkts);
1580 DUMP_STAT(qdev, rx_jabber_pkts);
1581 DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
1582 DUMP_STAT(qdev, rx_drop_events);
1583 DUMP_STAT(qdev, rx_fcerr_pkts);
1584 DUMP_STAT(qdev, rx_align_err);
1585 DUMP_STAT(qdev, rx_symbol_err);
1586 DUMP_STAT(qdev, rx_mac_err);
1587 DUMP_STAT(qdev, rx_ctl_pkts);
1588 DUMP_STAT(qdev, rx_pause_pkts);
1589 DUMP_STAT(qdev, rx_64_pkts);
1590 DUMP_STAT(qdev, rx_65_to_127_pkts);
1591 DUMP_STAT(qdev, rx_128_255_pkts);
1592 DUMP_STAT(qdev, rx_256_511_pkts);
1593 DUMP_STAT(qdev, rx_512_to_1023_pkts);
1594 DUMP_STAT(qdev, rx_1024_to_1518_pkts);
1595 DUMP_STAT(qdev, rx_1519_to_max_pkts);
1596 DUMP_STAT(qdev, rx_len_err_pkts);
1597};
1598#endif
1599
1600#ifdef QL_DEV_DUMP
1601
1602#define DUMP_QDEV_FIELD(qdev, type, field) \
1603 pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
1604#define DUMP_QDEV_DMA_FIELD(qdev, field) \
1605 pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
1606#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
1607 pr_err("%s[%d].%s = " type "\n", \
1608 #array, index, #field, qdev->array[index].field);
1609void ql_dump_qdev(struct ql_adapter *qdev)
1610{
1611 int i;
1612 DUMP_QDEV_FIELD(qdev, "%lx", flags);
1613 DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
1614 DUMP_QDEV_FIELD(qdev, "%p", pdev);
1615 DUMP_QDEV_FIELD(qdev, "%p", ndev);
1616 DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
1617 DUMP_QDEV_FIELD(qdev, "%p", reg_base);
1618 DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
1619 DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
1620 DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
1621 DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
1622 DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
1623 DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
1624 DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
1625 DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1626 if (qdev->msi_x_entry)
1627 for (i = 0; i < qdev->intr_count; i++) {
1628 DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
1629 DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
1630 }
1631 for (i = 0; i < qdev->intr_count; i++) {
1632 DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
1633 DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
1634 DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
1635 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
1636 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
1637 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
1638 }
1639 DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
1640 DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
1641 DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
1642 DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
1643 DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1644 DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
1645 DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
1646 DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
1647 DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
1648 DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
1649 DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
1650 DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
1651}
1652#endif
1653
1654#ifdef QL_CB_DUMP
1655void ql_dump_wqicb(struct wqicb *wqicb)
1656{
1657 pr_err("Dumping wqicb stuff...\n");
1658 pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
1659 pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
1660 pr_err("wqicb->cq_id_rss = %d\n",
1661 le16_to_cpu(wqicb->cq_id_rss));
1662 pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
1663 pr_err("wqicb->wq_addr = 0x%llx\n",
1664 (unsigned long long) le64_to_cpu(wqicb->addr));
1665 pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
1666 (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
1667}
1668
1669void ql_dump_tx_ring(struct tx_ring *tx_ring)
1670{
1671 if (tx_ring == NULL)
1672 return;
1673 pr_err("===================== Dumping tx_ring %d ===============\n",
1674 tx_ring->wq_id);
1675 pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
1676 pr_err("tx_ring->base_dma = 0x%llx\n",
1677 (unsigned long long) tx_ring->wq_base_dma);
1678 pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
1679 tx_ring->cnsmr_idx_sh_reg,
1680 tx_ring->cnsmr_idx_sh_reg
1681 ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
1682 pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
1683 pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
1684 pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
1685 pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
1686 pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
1687 pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
1688 pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
1689 pr_err("tx_ring->q = %p\n", tx_ring->q);
1690 pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
1691}
1692
1693void ql_dump_ricb(struct ricb *ricb)
1694{
1695 int i;
1696 pr_err("===================== Dumping ricb ===============\n");
1697 pr_err("Dumping ricb stuff...\n");
1698
1699 pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
1700 pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
1701 ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
1702 ricb->flags & RSS_L6K ? "RSS_L6K " : "",
1703 ricb->flags & RSS_LI ? "RSS_LI " : "",
1704 ricb->flags & RSS_LB ? "RSS_LB " : "",
1705 ricb->flags & RSS_LM ? "RSS_LM " : "",
1706 ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
1707 ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
1708 ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
1709 ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
1710 pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
1711 for (i = 0; i < 16; i++)
1712 pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
1713 le32_to_cpu(ricb->hash_cq_id[i]));
1714 for (i = 0; i < 10; i++)
1715 pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
1716 le32_to_cpu(ricb->ipv6_hash_key[i]));
1717 for (i = 0; i < 4; i++)
1718 pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
1719 le32_to_cpu(ricb->ipv4_hash_key[i]));
1720}
1721
1722void ql_dump_cqicb(struct cqicb *cqicb)
1723{
1724 pr_err("Dumping cqicb stuff...\n");
1725
1726 pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
1727 pr_err("cqicb->flags = %x\n", cqicb->flags);
1728 pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
1729 pr_err("cqicb->addr = 0x%llx\n",
1730 (unsigned long long) le64_to_cpu(cqicb->addr));
1731 pr_err("cqicb->prod_idx_addr = 0x%llx\n",
1732 (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
1733 pr_err("cqicb->pkt_delay = 0x%.04x\n",
1734 le16_to_cpu(cqicb->pkt_delay));
1735 pr_err("cqicb->irq_delay = 0x%.04x\n",
1736 le16_to_cpu(cqicb->irq_delay));
1737 pr_err("cqicb->lbq_addr = 0x%llx\n",
1738 (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
1739 pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
1740 le16_to_cpu(cqicb->lbq_buf_size));
1741 pr_err("cqicb->lbq_len = 0x%.04x\n",
1742 le16_to_cpu(cqicb->lbq_len));
1743 pr_err("cqicb->sbq_addr = 0x%llx\n",
1744 (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
1745 pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
1746 le16_to_cpu(cqicb->sbq_buf_size));
1747 pr_err("cqicb->sbq_len = 0x%.04x\n",
1748 le16_to_cpu(cqicb->sbq_len));
1749}
1750
1751void ql_dump_rx_ring(struct rx_ring *rx_ring)
1752{
1753 if (rx_ring == NULL)
1754 return;
1755 pr_err("===================== Dumping rx_ring %d ===============\n",
1756 rx_ring->cq_id);
1757 pr_err("Dumping rx_ring %d, type = %s%s%s\n",
1758 rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
1759 rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
1760 rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
1761 pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
1762 pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
1763 pr_err("rx_ring->cq_base_dma = %llx\n",
1764 (unsigned long long) rx_ring->cq_base_dma);
1765 pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
1766 pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
1767 pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
1768 rx_ring->prod_idx_sh_reg,
1769 rx_ring->prod_idx_sh_reg
1770 ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
1771 pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
1772 (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
1773 pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
1774 rx_ring->cnsmr_idx_db_reg);
1775 pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
1776 pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
1777 pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
1778
1779 pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
1780 pr_err("rx_ring->lbq_base_dma = %llx\n",
1781 (unsigned long long) rx_ring->lbq_base_dma);
1782 pr_err("rx_ring->lbq_base_indirect = %p\n",
1783 rx_ring->lbq_base_indirect);
1784 pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
1785 (unsigned long long) rx_ring->lbq_base_indirect_dma);
1786 pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
1787 pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
1788 pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
1789 pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
1790 rx_ring->lbq_prod_idx_db_reg);
1791 pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
1792 pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
1793 pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
1794 pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
1795 pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
1796
1797 pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
1798 pr_err("rx_ring->sbq_base_dma = %llx\n",
1799 (unsigned long long) rx_ring->sbq_base_dma);
1800 pr_err("rx_ring->sbq_base_indirect = %p\n",
1801 rx_ring->sbq_base_indirect);
1802 pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
1803 (unsigned long long) rx_ring->sbq_base_indirect_dma);
1804 pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
1805 pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
1806 pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
1807 pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
1808 rx_ring->sbq_prod_idx_db_reg);
1809 pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
1810 pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
1811 pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
1812 pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
1813 pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
1814 pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
1815 pr_err("rx_ring->irq = %d\n", rx_ring->irq);
1816 pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
1817 pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
1818}
1819
1820void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
1821{
1822 void *ptr;
1823
1824 pr_err("%s: Enter\n", __func__);
1825
1826 ptr = kmalloc(size, GFP_ATOMIC);
1827 if (ptr == NULL) {
1828 pr_err("%s: Couldn't allocate a buffer\n", __func__);
1829 return;
1830 }
1831
1832 if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
1833 pr_err("%s: Failed to upload control block!\n", __func__);
1834 goto fail_it;
1835 }
1836 switch (bit) {
1837 case CFG_DRQ:
1838 ql_dump_wqicb((struct wqicb *)ptr);
1839 break;
1840 case CFG_DCQ:
1841 ql_dump_cqicb((struct cqicb *)ptr);
1842 break;
1843 case CFG_DR:
1844 ql_dump_ricb((struct ricb *)ptr);
1845 break;
1846 default:
1847 pr_err("%s: Invalid bit value = %x\n", __func__, bit);
1848 break;
1849 }
1850fail_it:
1851 kfree(ptr);
1852}
1853#endif
1854
1855#ifdef QL_OB_DUMP
1856void ql_dump_tx_desc(struct tx_buf_desc *tbd)
1857{
1858 pr_err("tbd->addr = 0x%llx\n",
1859 le64_to_cpu((u64) tbd->addr));
1860 pr_err("tbd->len = %d\n",
1861 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1862 pr_err("tbd->flags = %s %s\n",
1863 tbd->len & TX_DESC_C ? "C" : ".",
1864 tbd->len & TX_DESC_E ? "E" : ".");
1865 tbd++;
1866 pr_err("tbd->addr = 0x%llx\n",
1867 le64_to_cpu((u64) tbd->addr));
1868 pr_err("tbd->len = %d\n",
1869 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1870 pr_err("tbd->flags = %s %s\n",
1871 tbd->len & TX_DESC_C ? "C" : ".",
1872 tbd->len & TX_DESC_E ? "E" : ".");
1873 tbd++;
1874 pr_err("tbd->addr = 0x%llx\n",
1875 le64_to_cpu((u64) tbd->addr));
1876 pr_err("tbd->len = %d\n",
1877 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1878 pr_err("tbd->flags = %s %s\n",
1879 tbd->len & TX_DESC_C ? "C" : ".",
1880 tbd->len & TX_DESC_E ? "E" : ".");
1881
1882}
1883
1884void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
1885{
1886 struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
1887 (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
1888 struct tx_buf_desc *tbd;
1889 u16 frame_len;
1890
1891 pr_err("%s\n", __func__);
1892 pr_err("opcode = %s\n",
1893 (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
1894 pr_err("flags1 = %s %s %s %s %s\n",
1895 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
1896 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
1897 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
1898 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
1899 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
1900 pr_err("flags2 = %s %s %s\n",
1901 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
1902 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
1903 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
1904 pr_err("flags3 = %s %s %s\n",
1905 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
1906 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
1907 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
1908 pr_err("tid = %x\n", ob_mac_iocb->tid);
1909 pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
1910 pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
1911 if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
1912 pr_err("frame_len = %d\n",
1913 le32_to_cpu(ob_mac_tso_iocb->frame_len));
1914 pr_err("mss = %d\n",
1915 le16_to_cpu(ob_mac_tso_iocb->mss));
1916 pr_err("prot_hdr_len = %d\n",
1917 le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
1918 pr_err("hdr_offset = 0x%.04x\n",
1919 le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
1920 frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
1921 } else {
1922 pr_err("frame_len = %d\n",
1923 le16_to_cpu(ob_mac_iocb->frame_len));
1924 frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
1925 }
1926 tbd = &ob_mac_iocb->tbd[0];
1927 ql_dump_tx_desc(tbd);
1928}
1929
1930void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
1931{
1932 pr_err("%s\n", __func__);
1933 pr_err("opcode = %d\n", ob_mac_rsp->opcode);
1934 pr_err("flags = %s %s %s %s %s %s %s\n",
1935 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
1936 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
1937 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
1938 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
1939 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
1940 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
1941 ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
1942 pr_err("tid = %x\n", ob_mac_rsp->tid);
1943}
1944#endif
1945
1946#ifdef QL_IB_DUMP
1947void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
1948{
1949 pr_err("%s\n", __func__);
1950 pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode);
1951 pr_err("flags1 = %s%s%s%s%s%s\n",
1952 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
1953 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
1954 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
1955 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
1956 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
1957 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
1958
1959 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
1960 pr_err("%s%s%s Multicast\n",
1961 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1962 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1963 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1964 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1965 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1966 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1967
1968 pr_err("flags2 = %s%s%s%s%s\n",
1969 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
1970 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
1971 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
1972 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
1973 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
1974
1975 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
1976 pr_err("%s%s%s%s%s error\n",
1977 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1978 IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
1979 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1980 IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
1981 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1982 IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
1983 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1984 IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
1985 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1986 IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
1987
1988 pr_err("flags3 = %s%s\n",
1989 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
1990 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
1991
1992 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
1993 pr_err("RSS flags = %s%s%s%s\n",
1994 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1995 IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
1996 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1997 IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
1998 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1999 IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
2000 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
2001 IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
2002
2003 pr_err("data_len = %d\n",
2004 le32_to_cpu(ib_mac_rsp->data_len));
2005 pr_err("data_addr = 0x%llx\n",
2006 (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
2007 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
2008 pr_err("rss = %x\n",
2009 le32_to_cpu(ib_mac_rsp->rss));
2010 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
2011 pr_err("vlan_id = %x\n",
2012 le16_to_cpu(ib_mac_rsp->vlan_id));
2013
2014 pr_err("flags4 = %s%s%s\n",
2015 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
2016 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
2017 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
2018
2019 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2020 pr_err("hdr length = %d\n",
2021 le32_to_cpu(ib_mac_rsp->hdr_len));
2022 pr_err("hdr addr = 0x%llx\n",
2023 (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
2024 }
2025}
2026#endif
2027
2028#ifdef QL_ALL_DUMP
2029void ql_dump_all(struct ql_adapter *qdev)
2030{
2031 int i;
2032
2033 QL_DUMP_REGS(qdev);
2034 QL_DUMP_QDEV(qdev);
2035 for (i = 0; i < qdev->tx_ring_count; i++) {
2036 QL_DUMP_TX_RING(&qdev->tx_ring[i]);
2037 QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
2038 }
2039 for (i = 0; i < qdev->rx_ring_count; i++) {
2040 QL_DUMP_RX_RING(&qdev->rx_ring[i]);
2041 QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
2042 }
2043}
2044#endif
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
new file mode 100644
index 00000000000..9b67bfea035
--- /dev/null
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -0,0 +1,688 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/types.h>
4#include <linux/module.h>
5#include <linux/list.h>
6#include <linux/pci.h>
7#include <linux/dma-mapping.h>
8#include <linux/pagemap.h>
9#include <linux/sched.h>
10#include <linux/dmapool.h>
11#include <linux/mempool.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
14#include <linux/interrupt.h>
15#include <linux/errno.h>
16#include <linux/ioport.h>
17#include <linux/in.h>
18#include <linux/ip.h>
19#include <linux/ipv6.h>
20#include <net/ipv6.h>
21#include <linux/tcp.h>
22#include <linux/udp.h>
23#include <linux/if_arp.h>
24#include <linux/if_ether.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/skbuff.h>
29#include <linux/rtnetlink.h>
30#include <linux/if_vlan.h>
31#include <linux/delay.h>
32#include <linux/mm.h>
33#include <linux/vmalloc.h>
34
35
36#include "qlge.h"
37
38static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
39 "Loopback test (offline)"
40};
41#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
42
43static int ql_update_ring_coalescing(struct ql_adapter *qdev)
44{
45 int i, status = 0;
46 struct rx_ring *rx_ring;
47 struct cqicb *cqicb;
48
49 if (!netif_running(qdev->ndev))
50 return status;
51
52 /* Skip the default queue, and update the outbound handler
53 * queues if they changed.
54 */
55 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
56 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
57 le16_to_cpu(cqicb->pkt_delay) !=
58 qdev->tx_max_coalesced_frames) {
59 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
60 rx_ring = &qdev->rx_ring[i];
61 cqicb = (struct cqicb *)rx_ring;
62 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
63 cqicb->pkt_delay =
64 cpu_to_le16(qdev->tx_max_coalesced_frames);
65 cqicb->flags = FLAGS_LI;
66 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
67 CFG_LCQ, rx_ring->cq_id);
68 if (status) {
69 netif_err(qdev, ifup, qdev->ndev,
70 "Failed to load CQICB.\n");
71 goto exit;
72 }
73 }
74 }
75
76 /* Update the inbound (RSS) handler queues if they changed. */
77 cqicb = (struct cqicb *)&qdev->rx_ring[0];
78 if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
79 le16_to_cpu(cqicb->pkt_delay) !=
80 qdev->rx_max_coalesced_frames) {
81 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
82 rx_ring = &qdev->rx_ring[i];
83 cqicb = (struct cqicb *)rx_ring;
84 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
85 cqicb->pkt_delay =
86 cpu_to_le16(qdev->rx_max_coalesced_frames);
87 cqicb->flags = FLAGS_LI;
88 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
89 CFG_LCQ, rx_ring->cq_id);
90 if (status) {
91 netif_err(qdev, ifup, qdev->ndev,
92 "Failed to load CQICB.\n");
93 goto exit;
94 }
95 }
96 }
97exit:
98 return status;
99}
100
101static void ql_update_stats(struct ql_adapter *qdev)
102{
103 u32 i;
104 u64 data;
105 u64 *iter = &qdev->nic_stats.tx_pkts;
106
107 spin_lock(&qdev->stats_lock);
108 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
109 netif_err(qdev, drv, qdev->ndev,
110 "Couldn't get xgmac sem.\n");
111 goto quit;
112 }
113 /*
114 * Get TX statistics.
115 */
116 for (i = 0x200; i < 0x280; i += 8) {
117 if (ql_read_xgmac_reg64(qdev, i, &data)) {
118 netif_err(qdev, drv, qdev->ndev,
119 "Error reading status register 0x%.04x.\n",
120 i);
121 goto end;
122 } else
123 *iter = data;
124 iter++;
125 }
126
127 /*
128 * Get RX statistics.
129 */
130 for (i = 0x300; i < 0x3d0; i += 8) {
131 if (ql_read_xgmac_reg64(qdev, i, &data)) {
132 netif_err(qdev, drv, qdev->ndev,
133 "Error reading status register 0x%.04x.\n",
134 i);
135 goto end;
136 } else
137 *iter = data;
138 iter++;
139 }
140
141 /*
142 * Get Per-priority TX pause frame counter statistics.
143 */
144 for (i = 0x500; i < 0x540; i += 8) {
145 if (ql_read_xgmac_reg64(qdev, i, &data)) {
146 netif_err(qdev, drv, qdev->ndev,
147 "Error reading status register 0x%.04x.\n",
148 i);
149 goto end;
150 } else
151 *iter = data;
152 iter++;
153 }
154
155 /*
156 * Get Per-priority RX pause frame counter statistics.
157 */
158 for (i = 0x568; i < 0x5a8; i += 8) {
159 if (ql_read_xgmac_reg64(qdev, i, &data)) {
160 netif_err(qdev, drv, qdev->ndev,
161 "Error reading status register 0x%.04x.\n",
162 i);
163 goto end;
164 } else
165 *iter = data;
166 iter++;
167 }
168
169 /*
170 * Get RX NIC FIFO DROP statistics.
171 */
172 if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
173 netif_err(qdev, drv, qdev->ndev,
174 "Error reading status register 0x%.04x.\n", i);
175 goto end;
176 } else
177 *iter = data;
178end:
179 ql_sem_unlock(qdev, qdev->xg_sem_mask);
180quit:
181 spin_unlock(&qdev->stats_lock);
182
183 QL_DUMP_STAT(qdev);
184}
185
186static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
187 {"tx_pkts"},
188 {"tx_bytes"},
189 {"tx_mcast_pkts"},
190 {"tx_bcast_pkts"},
191 {"tx_ucast_pkts"},
192 {"tx_ctl_pkts"},
193 {"tx_pause_pkts"},
194 {"tx_64_pkts"},
195 {"tx_65_to_127_pkts"},
196 {"tx_128_to_255_pkts"},
197 {"tx_256_511_pkts"},
198 {"tx_512_to_1023_pkts"},
199 {"tx_1024_to_1518_pkts"},
200 {"tx_1519_to_max_pkts"},
201 {"tx_undersize_pkts"},
202 {"tx_oversize_pkts"},
203 {"rx_bytes"},
204 {"rx_bytes_ok"},
205 {"rx_pkts"},
206 {"rx_pkts_ok"},
207 {"rx_bcast_pkts"},
208 {"rx_mcast_pkts"},
209 {"rx_ucast_pkts"},
210 {"rx_undersize_pkts"},
211 {"rx_oversize_pkts"},
212 {"rx_jabber_pkts"},
213 {"rx_undersize_fcerr_pkts"},
214 {"rx_drop_events"},
215 {"rx_fcerr_pkts"},
216 {"rx_align_err"},
217 {"rx_symbol_err"},
218 {"rx_mac_err"},
219 {"rx_ctl_pkts"},
220 {"rx_pause_pkts"},
221 {"rx_64_pkts"},
222 {"rx_65_to_127_pkts"},
223 {"rx_128_255_pkts"},
224 {"rx_256_511_pkts"},
225 {"rx_512_to_1023_pkts"},
226 {"rx_1024_to_1518_pkts"},
227 {"rx_1519_to_max_pkts"},
228 {"rx_len_err_pkts"},
229 {"tx_cbfc_pause_frames0"},
230 {"tx_cbfc_pause_frames1"},
231 {"tx_cbfc_pause_frames2"},
232 {"tx_cbfc_pause_frames3"},
233 {"tx_cbfc_pause_frames4"},
234 {"tx_cbfc_pause_frames5"},
235 {"tx_cbfc_pause_frames6"},
236 {"tx_cbfc_pause_frames7"},
237 {"rx_cbfc_pause_frames0"},
238 {"rx_cbfc_pause_frames1"},
239 {"rx_cbfc_pause_frames2"},
240 {"rx_cbfc_pause_frames3"},
241 {"rx_cbfc_pause_frames4"},
242 {"rx_cbfc_pause_frames5"},
243 {"rx_cbfc_pause_frames6"},
244 {"rx_cbfc_pause_frames7"},
245 {"rx_nic_fifo_drop"},
246};
247
248static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
249{
250 switch (stringset) {
251 case ETH_SS_STATS:
252 memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
253 break;
254 }
255}
256
257static int ql_get_sset_count(struct net_device *dev, int sset)
258{
259 switch (sset) {
260 case ETH_SS_TEST:
261 return QLGE_TEST_LEN;
262 case ETH_SS_STATS:
263 return ARRAY_SIZE(ql_stats_str_arr);
264 default:
265 return -EOPNOTSUPP;
266 }
267}
268
269static void
270ql_get_ethtool_stats(struct net_device *ndev,
271 struct ethtool_stats *stats, u64 *data)
272{
273 struct ql_adapter *qdev = netdev_priv(ndev);
274 struct nic_stats *s = &qdev->nic_stats;
275
276 ql_update_stats(qdev);
277
278 *data++ = s->tx_pkts;
279 *data++ = s->tx_bytes;
280 *data++ = s->tx_mcast_pkts;
281 *data++ = s->tx_bcast_pkts;
282 *data++ = s->tx_ucast_pkts;
283 *data++ = s->tx_ctl_pkts;
284 *data++ = s->tx_pause_pkts;
285 *data++ = s->tx_64_pkt;
286 *data++ = s->tx_65_to_127_pkt;
287 *data++ = s->tx_128_to_255_pkt;
288 *data++ = s->tx_256_511_pkt;
289 *data++ = s->tx_512_to_1023_pkt;
290 *data++ = s->tx_1024_to_1518_pkt;
291 *data++ = s->tx_1519_to_max_pkt;
292 *data++ = s->tx_undersize_pkt;
293 *data++ = s->tx_oversize_pkt;
294 *data++ = s->rx_bytes;
295 *data++ = s->rx_bytes_ok;
296 *data++ = s->rx_pkts;
297 *data++ = s->rx_pkts_ok;
298 *data++ = s->rx_bcast_pkts;
299 *data++ = s->rx_mcast_pkts;
300 *data++ = s->rx_ucast_pkts;
301 *data++ = s->rx_undersize_pkts;
302 *data++ = s->rx_oversize_pkts;
303 *data++ = s->rx_jabber_pkts;
304 *data++ = s->rx_undersize_fcerr_pkts;
305 *data++ = s->rx_drop_events;
306 *data++ = s->rx_fcerr_pkts;
307 *data++ = s->rx_align_err;
308 *data++ = s->rx_symbol_err;
309 *data++ = s->rx_mac_err;
310 *data++ = s->rx_ctl_pkts;
311 *data++ = s->rx_pause_pkts;
312 *data++ = s->rx_64_pkts;
313 *data++ = s->rx_65_to_127_pkts;
314 *data++ = s->rx_128_255_pkts;
315 *data++ = s->rx_256_511_pkts;
316 *data++ = s->rx_512_to_1023_pkts;
317 *data++ = s->rx_1024_to_1518_pkts;
318 *data++ = s->rx_1519_to_max_pkts;
319 *data++ = s->rx_len_err_pkts;
320 *data++ = s->tx_cbfc_pause_frames0;
321 *data++ = s->tx_cbfc_pause_frames1;
322 *data++ = s->tx_cbfc_pause_frames2;
323 *data++ = s->tx_cbfc_pause_frames3;
324 *data++ = s->tx_cbfc_pause_frames4;
325 *data++ = s->tx_cbfc_pause_frames5;
326 *data++ = s->tx_cbfc_pause_frames6;
327 *data++ = s->tx_cbfc_pause_frames7;
328 *data++ = s->rx_cbfc_pause_frames0;
329 *data++ = s->rx_cbfc_pause_frames1;
330 *data++ = s->rx_cbfc_pause_frames2;
331 *data++ = s->rx_cbfc_pause_frames3;
332 *data++ = s->rx_cbfc_pause_frames4;
333 *data++ = s->rx_cbfc_pause_frames5;
334 *data++ = s->rx_cbfc_pause_frames6;
335 *data++ = s->rx_cbfc_pause_frames7;
336 *data++ = s->rx_nic_fifo_drop;
337}
338
339static int ql_get_settings(struct net_device *ndev,
340 struct ethtool_cmd *ecmd)
341{
342 struct ql_adapter *qdev = netdev_priv(ndev);
343
344 ecmd->supported = SUPPORTED_10000baseT_Full;
345 ecmd->advertising = ADVERTISED_10000baseT_Full;
346 ecmd->autoneg = AUTONEG_ENABLE;
347 ecmd->transceiver = XCVR_EXTERNAL;
348 if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
349 STS_LINK_TYPE_10GBASET) {
350 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
351 ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
352 ecmd->port = PORT_TP;
353 } else {
354 ecmd->supported |= SUPPORTED_FIBRE;
355 ecmd->advertising |= ADVERTISED_FIBRE;
356 ecmd->port = PORT_FIBRE;
357 }
358
359 ethtool_cmd_speed_set(ecmd, SPEED_10000);
360 ecmd->duplex = DUPLEX_FULL;
361
362 return 0;
363}
364
365static void ql_get_drvinfo(struct net_device *ndev,
366 struct ethtool_drvinfo *drvinfo)
367{
368 struct ql_adapter *qdev = netdev_priv(ndev);
369 strncpy(drvinfo->driver, qlge_driver_name, 32);
370 strncpy(drvinfo->version, qlge_driver_version, 32);
371 snprintf(drvinfo->fw_version, 32, "v%d.%d.%d",
372 (qdev->fw_rev_id & 0x00ff0000) >> 16,
373 (qdev->fw_rev_id & 0x0000ff00) >> 8,
374 (qdev->fw_rev_id & 0x000000ff));
375 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
376 drvinfo->n_stats = 0;
377 drvinfo->testinfo_len = 0;
378 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
379 drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
380 else
381 drvinfo->regdump_len = sizeof(struct ql_reg_dump);
382 drvinfo->eedump_len = 0;
383}
384
385static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
386{
387 struct ql_adapter *qdev = netdev_priv(ndev);
388 /* What we support. */
389 wol->supported = WAKE_MAGIC;
390 /* What we've currently got set. */
391 wol->wolopts = qdev->wol;
392}
393
394static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
395{
396 struct ql_adapter *qdev = netdev_priv(ndev);
397 int status;
398
399 if (wol->wolopts & ~WAKE_MAGIC)
400 return -EINVAL;
401 qdev->wol = wol->wolopts;
402
403 netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
404 if (!qdev->wol) {
405 u32 wol = 0;
406 status = ql_mb_wol_mode(qdev, wol);
407 netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
408 status == 0 ? "cleared successfully" : "clear failed",
409 wol);
410 }
411
412 return 0;
413}
414
415static int ql_set_phys_id(struct net_device *ndev,
416 enum ethtool_phys_id_state state)
417
418{
419 struct ql_adapter *qdev = netdev_priv(ndev);
420
421 switch (state) {
422 case ETHTOOL_ID_ACTIVE:
423 /* Save the current LED settings */
424 if (ql_mb_get_led_cfg(qdev))
425 return -EIO;
426
427 /* Start blinking */
428 ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
429 return 0;
430
431 case ETHTOOL_ID_INACTIVE:
432 /* Restore LED settings */
433 if (ql_mb_set_led_cfg(qdev, qdev->led_config))
434 return -EIO;
435 return 0;
436
437 default:
438 return -EINVAL;
439 }
440}
441
442static int ql_start_loopback(struct ql_adapter *qdev)
443{
444 if (netif_carrier_ok(qdev->ndev)) {
445 set_bit(QL_LB_LINK_UP, &qdev->flags);
446 netif_carrier_off(qdev->ndev);
447 } else
448 clear_bit(QL_LB_LINK_UP, &qdev->flags);
449 qdev->link_config |= CFG_LOOPBACK_PCS;
450 return ql_mb_set_port_cfg(qdev);
451}
452
453static void ql_stop_loopback(struct ql_adapter *qdev)
454{
455 qdev->link_config &= ~CFG_LOOPBACK_PCS;
456 ql_mb_set_port_cfg(qdev);
457 if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
458 netif_carrier_on(qdev->ndev);
459 clear_bit(QL_LB_LINK_UP, &qdev->flags);
460 }
461}
462
463static void ql_create_lb_frame(struct sk_buff *skb,
464 unsigned int frame_size)
465{
466 memset(skb->data, 0xFF, frame_size);
467 frame_size &= ~1;
468 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
469 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
470 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
471}
472
473void ql_check_lb_frame(struct ql_adapter *qdev,
474 struct sk_buff *skb)
475{
476 unsigned int frame_size = skb->len;
477
478 if ((*(skb->data + 3) == 0xFF) &&
479 (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
480 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
481 atomic_dec(&qdev->lb_count);
482 return;
483 }
484}
485
486static int ql_run_loopback_test(struct ql_adapter *qdev)
487{
488 int i;
489 netdev_tx_t rc;
490 struct sk_buff *skb;
491 unsigned int size = SMALL_BUF_MAP_SIZE;
492
493 for (i = 0; i < 64; i++) {
494 skb = netdev_alloc_skb(qdev->ndev, size);
495 if (!skb)
496 return -ENOMEM;
497
498 skb->queue_mapping = 0;
499 skb_put(skb, size);
500 ql_create_lb_frame(skb, size);
501 rc = ql_lb_send(skb, qdev->ndev);
502 if (rc != NETDEV_TX_OK)
503 return -EPIPE;
504 atomic_inc(&qdev->lb_count);
505 }
506 /* Give queue time to settle before testing results. */
507 msleep(2);
508 ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
509 return atomic_read(&qdev->lb_count) ? -EIO : 0;
510}
511
512static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
513{
514 *data = ql_start_loopback(qdev);
515 if (*data)
516 goto out;
517 *data = ql_run_loopback_test(qdev);
518out:
519 ql_stop_loopback(qdev);
520 return *data;
521}
522
523static void ql_self_test(struct net_device *ndev,
524 struct ethtool_test *eth_test, u64 *data)
525{
526 struct ql_adapter *qdev = netdev_priv(ndev);
527
528 if (netif_running(ndev)) {
529 set_bit(QL_SELFTEST, &qdev->flags);
530 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
531 /* Offline tests */
532 if (ql_loopback_test(qdev, &data[0]))
533 eth_test->flags |= ETH_TEST_FL_FAILED;
534
535 } else {
536 /* Online tests */
537 data[0] = 0;
538 }
539 clear_bit(QL_SELFTEST, &qdev->flags);
540 /* Give link time to come up after
541 * port configuration changes.
542 */
543 msleep_interruptible(4 * 1000);
544 } else {
545 netif_err(qdev, drv, qdev->ndev,
546 "is down, Loopback test will fail.\n");
547 eth_test->flags |= ETH_TEST_FL_FAILED;
548 }
549}
550
551static int ql_get_regs_len(struct net_device *ndev)
552{
553 struct ql_adapter *qdev = netdev_priv(ndev);
554
555 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
556 return sizeof(struct ql_mpi_coredump);
557 else
558 return sizeof(struct ql_reg_dump);
559}
560
561static void ql_get_regs(struct net_device *ndev,
562 struct ethtool_regs *regs, void *p)
563{
564 struct ql_adapter *qdev = netdev_priv(ndev);
565
566 ql_get_dump(qdev, p);
567 qdev->core_is_dumped = 0;
568 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
569 regs->len = sizeof(struct ql_mpi_coredump);
570 else
571 regs->len = sizeof(struct ql_reg_dump);
572}
573
574static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
575{
576 struct ql_adapter *qdev = netdev_priv(dev);
577
578 c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
579 c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
580
581 /* This chip coalesces as follows:
582 * If a packet arrives, hold off interrupts until
583 * cqicb->int_delay expires, but if no other packets arrive don't
584 * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
585 * timer to coalesce on a frame basis. So, we have to take ethtool's
586 * max_coalesced_frames value and convert it to a delay in microseconds.
587 * We do this by using a basic thoughput of 1,000,000 frames per
588 * second @ (1024 bytes). This means one frame per usec. So it's a
589 * simple one to one ratio.
590 */
591 c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
592 c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
593
594 return 0;
595}
596
597static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
598{
599 struct ql_adapter *qdev = netdev_priv(ndev);
600
601 /* Validate user parameters. */
602 if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
603 return -EINVAL;
604 /* Don't wait more than 10 usec. */
605 if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
606 return -EINVAL;
607 if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
608 return -EINVAL;
609 if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
610 return -EINVAL;
611
612 /* Verify a change took place before updating the hardware. */
613 if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
614 qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
615 qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
616 qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
617 return 0;
618
619 qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
620 qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
621 qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
622 qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
623
624 return ql_update_ring_coalescing(qdev);
625}
626
627static void ql_get_pauseparam(struct net_device *netdev,
628 struct ethtool_pauseparam *pause)
629{
630 struct ql_adapter *qdev = netdev_priv(netdev);
631
632 ql_mb_get_port_cfg(qdev);
633 if (qdev->link_config & CFG_PAUSE_STD) {
634 pause->rx_pause = 1;
635 pause->tx_pause = 1;
636 }
637}
638
639static int ql_set_pauseparam(struct net_device *netdev,
640 struct ethtool_pauseparam *pause)
641{
642 struct ql_adapter *qdev = netdev_priv(netdev);
643 int status = 0;
644
645 if ((pause->rx_pause) && (pause->tx_pause))
646 qdev->link_config |= CFG_PAUSE_STD;
647 else if (!pause->rx_pause && !pause->tx_pause)
648 qdev->link_config &= ~CFG_PAUSE_STD;
649 else
650 return -EINVAL;
651
652 status = ql_mb_set_port_cfg(qdev);
653 return status;
654}
655
656static u32 ql_get_msglevel(struct net_device *ndev)
657{
658 struct ql_adapter *qdev = netdev_priv(ndev);
659 return qdev->msg_enable;
660}
661
662static void ql_set_msglevel(struct net_device *ndev, u32 value)
663{
664 struct ql_adapter *qdev = netdev_priv(ndev);
665 qdev->msg_enable = value;
666}
667
668const struct ethtool_ops qlge_ethtool_ops = {
669 .get_settings = ql_get_settings,
670 .get_drvinfo = ql_get_drvinfo,
671 .get_wol = ql_get_wol,
672 .set_wol = ql_set_wol,
673 .get_regs_len = ql_get_regs_len,
674 .get_regs = ql_get_regs,
675 .get_msglevel = ql_get_msglevel,
676 .set_msglevel = ql_set_msglevel,
677 .get_link = ethtool_op_get_link,
678 .set_phys_id = ql_set_phys_id,
679 .self_test = ql_self_test,
680 .get_pauseparam = ql_get_pauseparam,
681 .set_pauseparam = ql_set_pauseparam,
682 .get_coalesce = ql_get_coalesce,
683 .set_coalesce = ql_set_coalesce,
684 .get_sset_count = ql_get_sset_count,
685 .get_strings = ql_get_strings,
686 .get_ethtool_stats = ql_get_ethtool_stats,
687};
688
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
new file mode 100644
index 00000000000..f07e96ec884
--- /dev/null
+++ b/drivers/net/qlge/qlge_main.c
@@ -0,0 +1,4987 @@
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/bitops.h>
11#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
37#include <linux/if_vlan.h>
38#include <linux/skbuff.h>
39#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
42#include <linux/prefetch.h>
43#include <net/ip6_checksum.h>
44
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
62/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
69MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
74static int qlge_irq_type = MSIX_IRQ;
75module_param(qlge_irq_type, int, 0664);
76MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
89
90static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
146 unsigned int wait_count = 30;
147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
150 udelay(100);
151 } while (--wait_count);
152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230 return -ENOMEM;
231 }
232
233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
241 goto exit;
242 }
243
244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311 MAC_ADDR_MR, 0);
312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
323 status = -EPERM;
324 }
325exit:
326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
378
379 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type == MAC_ADDR_TYPE_MULTI_MAC ?
382 "MULTICAST" : "UNICAST",
383 addr, index);
384
385 status =
386 ql_wait_reg_rdy(qdev,
387 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
388 if (status)
389 goto exit;
390 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391 (index << MAC_ADDR_IDX_SHIFT) | /* index */
392 type); /* type */
393 ql_write32(qdev, MAC_ADDR_DATA, lower);
394 status =
395 ql_wait_reg_rdy(qdev,
396 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
397 if (status)
398 goto exit;
399 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400 (index << MAC_ADDR_IDX_SHIFT) | /* index */
401 type); /* type */
402 ql_write32(qdev, MAC_ADDR_DATA, upper);
403 status =
404 ql_wait_reg_rdy(qdev,
405 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
406 if (status)
407 goto exit;
408 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
409 (index << MAC_ADDR_IDX_SHIFT) | /* index */
410 type); /* type */
411 /* This field should also include the queue id
412 and possibly the function id. Right now we hardcode
413 the route field to NIC core.
414 */
415 cam_output = (CAM_OUT_ROUTE_NIC |
416 (qdev->
417 func << CAM_OUT_FUNC_SHIFT) |
418 (0 << CAM_OUT_CQ_ID_SHIFT));
419 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
420 cam_output |= CAM_OUT_RV;
421 /* route to NIC core */
422 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
423 break;
424 }
425 case MAC_ADDR_TYPE_VLAN:
426 {
427 u32 enable_bit = *((u32 *) &addr[0]);
428 /* For VLAN, the addr actually holds a bit that
429 * either enables or disables the vlan id we are
430 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about.
432 */
433 netif_info(qdev, ifup, qdev->ndev,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit ? "Adding" : "Removing",
436 index,
437 enable_bit ? "to" : "from");
438
439 status =
440 ql_wait_reg_rdy(qdev,
441 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
442 if (status)
443 goto exit;
444 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445 (index << MAC_ADDR_IDX_SHIFT) | /* index */
446 type | /* type */
447 enable_bit); /* enable/disable */
448 break;
449 }
450 case MAC_ADDR_TYPE_MULTI_FLTR:
451 default:
452 netif_crit(qdev, ifup, qdev->ndev,
453 "Address type %d not yet supported.\n", type);
454 status = -EPERM;
455 }
456exit:
457 return status;
458}
459
460/* Set or clear MAC address in hardware. We sometimes
461 * have to clear it to prevent wrong frame routing
462 * especially in a bonding environment.
463 */
464static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465{
466 int status;
467 char zero_mac_addr[ETH_ALEN];
468 char *addr;
469
470 if (set) {
471 addr = &qdev->current_mac_addr[0];
472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Set Mac addr %pM\n", addr);
474 } else {
475 memset(zero_mac_addr, 0, ETH_ALEN);
476 addr = &zero_mac_addr[0];
477 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478 "Clearing MAC address\n");
479 }
480 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481 if (status)
482 return status;
483 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486 if (status)
487 netif_err(qdev, ifup, qdev->ndev,
488 "Failed to init mac address.\n");
489 return status;
490}
491
492void ql_link_on(struct ql_adapter *qdev)
493{
494 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
495 netif_carrier_on(qdev->ndev);
496 ql_set_mac_addr(qdev, 1);
497}
498
499void ql_link_off(struct ql_adapter *qdev)
500{
501 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
502 netif_carrier_off(qdev->ndev);
503 ql_set_mac_addr(qdev, 0);
504}
505
506/* Get a specific frame routing value from the CAM.
507 * Used for debug and reg dump.
508 */
509int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510{
511 int status = 0;
512
513 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
514 if (status)
515 goto exit;
516
517 ql_write32(qdev, RT_IDX,
518 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
519 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
520 if (status)
521 goto exit;
522 *value = ql_read32(qdev, RT_DATA);
523exit:
524 return status;
525}
526
527/* The NIC function for this chip has 16 routing indexes. Each one can be used
528 * to route different frame types to various inbound queues. We send broadcast/
529 * multicast/error frames to the default queue for slow handling,
530 * and CAM hit/RSS frames to the fast handling queues.
531 */
532static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533 int enable)
534{
535 int status = -EINVAL; /* Return error if no mask match. */
536 u32 value = 0;
537
538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539 "%s %s mask %s the routing reg.\n",
540 enable ? "Adding" : "Removing",
541 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554 index == RT_IDX_UNUSED013 ? "UNUSED13" :
555 index == RT_IDX_UNUSED014 ? "UNUSED14" :
556 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable ? "to" : "from");
559
560 switch (mask) {
561 case RT_IDX_CAM_HIT:
562 {
563 value = RT_IDX_DST_CAM_Q | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 break;
567 }
568 case RT_IDX_VALID: /* Promiscuous Mode frames. */
569 {
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573 break;
574 }
575 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
576 {
577 value = RT_IDX_DST_DFLT_Q | /* dest */
578 RT_IDX_TYPE_NICQ | /* type */
579 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580 break;
581 }
582 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583 {
584 value = RT_IDX_DST_DFLT_Q | /* dest */
585 RT_IDX_TYPE_NICQ | /* type */
586 (RT_IDX_IP_CSUM_ERR_SLOT <<
587 RT_IDX_IDX_SHIFT); /* index */
588 break;
589 }
590 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591 {
592 value = RT_IDX_DST_DFLT_Q | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595 RT_IDX_IDX_SHIFT); /* index */
596 break;
597 }
598 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
599 {
600 value = RT_IDX_DST_DFLT_Q | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603 break;
604 }
605 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
606 {
607 value = RT_IDX_DST_DFLT_Q | /* dest */
608 RT_IDX_TYPE_NICQ | /* type */
609 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610 break;
611 }
612 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
613 {
614 value = RT_IDX_DST_DFLT_Q | /* dest */
615 RT_IDX_TYPE_NICQ | /* type */
616 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617 break;
618 }
619 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
620 {
621 value = RT_IDX_DST_RSS | /* dest */
622 RT_IDX_TYPE_NICQ | /* type */
623 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624 break;
625 }
626 case 0: /* Clear the E-bit on an entry. */
627 {
628 value = RT_IDX_DST_DFLT_Q | /* dest */
629 RT_IDX_TYPE_NICQ | /* type */
630 (index << RT_IDX_IDX_SHIFT);/* index */
631 break;
632 }
633 default:
634 netif_err(qdev, ifup, qdev->ndev,
635 "Mask type %d not yet supported.\n", mask);
636 status = -EPERM;
637 goto exit;
638 }
639
640 if (value) {
641 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642 if (status)
643 goto exit;
644 value |= (enable ? RT_IDX_E : 0);
645 ql_write32(qdev, RT_IDX, value);
646 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647 }
648exit:
649 return status;
650}
651
652static void ql_enable_interrupts(struct ql_adapter *qdev)
653{
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655}
656
657static void ql_disable_interrupts(struct ql_adapter *qdev)
658{
659 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660}
661
662/* If we're running with multiple MSI-X vectors then we enable on the fly.
663 * Otherwise, we may have multiple outstanding workers and don't want to
664 * enable until the last one finishes. In this case, the irq_cnt gets
665 * incremented every time we queue a worker and decremented every time
666 * a worker finishes. Once it hits zero we enable the interrupt.
667 */
668u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
669{
670 u32 var = 0;
671 unsigned long hw_flags = 0;
672 struct intr_context *ctx = qdev->intr_context + intr;
673
674 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675 /* Always enable if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
677 */
678 ql_write32(qdev, INTR_EN,
679 ctx->intr_en_mask);
680 var = ql_read32(qdev, STS);
681 return var;
682 }
683
684 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685 if (atomic_dec_and_test(&ctx->irq_cnt)) {
686 ql_write32(qdev, INTR_EN,
687 ctx->intr_en_mask);
688 var = ql_read32(qdev, STS);
689 }
690 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691 return var;
692}
693
694static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695{
696 u32 var = 0;
697 struct intr_context *ctx;
698
699 /* HW disables for us if we're MSIX multi interrupts and
700 * it's not the default (zeroeth) interrupt.
701 */
702 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703 return 0;
704
705 ctx = qdev->intr_context + intr;
706 spin_lock(&qdev->hw_lock);
707 if (!atomic_read(&ctx->irq_cnt)) {
708 ql_write32(qdev, INTR_EN,
709 ctx->intr_dis_mask);
710 var = ql_read32(qdev, STS);
711 }
712 atomic_inc(&ctx->irq_cnt);
713 spin_unlock(&qdev->hw_lock);
714 return var;
715}
716
717static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718{
719 int i;
720 for (i = 0; i < qdev->intr_count; i++) {
721 /* The enable call does a atomic_dec_and_test
722 * and enables only if the result is zero.
723 * So we precharge it here.
724 */
725 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726 i == 0))
727 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
728 ql_enable_completion_interrupt(qdev, i);
729 }
730
731}
732
733static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734{
735 int status, i;
736 u16 csum = 0;
737 __le16 *flash = (__le16 *)&qdev->flash;
738
739 status = strncmp((char *)&qdev->flash, str, 4);
740 if (status) {
741 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
742 return status;
743 }
744
745 for (i = 0; i < size; i++)
746 csum += le16_to_cpu(*flash++);
747
748 if (csum)
749 netif_err(qdev, ifup, qdev->ndev,
750 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
751
752 return csum;
753}
754
755static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
756{
757 int status = 0;
758 /* wait for reg to come ready */
759 status = ql_wait_reg_rdy(qdev,
760 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761 if (status)
762 goto exit;
763 /* set up for reg read */
764 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765 /* wait for reg to come ready */
766 status = ql_wait_reg_rdy(qdev,
767 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768 if (status)
769 goto exit;
770 /* This data is stored on flash as an array of
771 * __le32. Since ql_read32() returns cpu endian
772 * we need to swap it back.
773 */
774 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
775exit:
776 return status;
777}
778
779static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780{
781 u32 i, size;
782 int status;
783 __le32 *p = (__le32 *)&qdev->flash;
784 u32 offset;
785 u8 mac_addr[6];
786
787 /* Get flash offset for function and adjust
788 * for dword access.
789 */
790 if (!qdev->port)
791 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792 else
793 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794
795 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796 return -ETIMEDOUT;
797
798 size = sizeof(struct flash_params_8000) / sizeof(u32);
799 for (i = 0; i < size; i++, p++) {
800 status = ql_read_flash_word(qdev, i+offset, p);
801 if (status) {
802 netif_err(qdev, ifup, qdev->ndev,
803 "Error reading flash.\n");
804 goto exit;
805 }
806 }
807
808 status = ql_validate_flash(qdev,
809 sizeof(struct flash_params_8000) / sizeof(u16),
810 "8000");
811 if (status) {
812 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
813 status = -EINVAL;
814 goto exit;
815 }
816
817 /* Extract either manufacturer or BOFM modified
818 * MAC address.
819 */
820 if (qdev->flash.flash_params_8000.data_type1 == 2)
821 memcpy(mac_addr,
822 qdev->flash.flash_params_8000.mac_addr1,
823 qdev->ndev->addr_len);
824 else
825 memcpy(mac_addr,
826 qdev->flash.flash_params_8000.mac_addr,
827 qdev->ndev->addr_len);
828
829 if (!is_valid_ether_addr(mac_addr)) {
830 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
831 status = -EINVAL;
832 goto exit;
833 }
834
835 memcpy(qdev->ndev->dev_addr,
836 mac_addr,
837 qdev->ndev->addr_len);
838
839exit:
840 ql_sem_unlock(qdev, SEM_FLASH_MASK);
841 return status;
842}
843
844static int ql_get_8012_flash_params(struct ql_adapter *qdev)
845{
846 int i;
847 int status;
848 __le32 *p = (__le32 *)&qdev->flash;
849 u32 offset = 0;
850 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
851
852 /* Second function's parameters follow the first
853 * function's.
854 */
855 if (qdev->port)
856 offset = size;
857
858 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859 return -ETIMEDOUT;
860
861 for (i = 0; i < size; i++, p++) {
862 status = ql_read_flash_word(qdev, i+offset, p);
863 if (status) {
864 netif_err(qdev, ifup, qdev->ndev,
865 "Error reading flash.\n");
866 goto exit;
867 }
868
869 }
870
871 status = ql_validate_flash(qdev,
872 sizeof(struct flash_params_8012) / sizeof(u16),
873 "8012");
874 if (status) {
875 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
876 status = -EINVAL;
877 goto exit;
878 }
879
880 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881 status = -EINVAL;
882 goto exit;
883 }
884
885 memcpy(qdev->ndev->dev_addr,
886 qdev->flash.flash_params_8012.mac_addr,
887 qdev->ndev->addr_len);
888
889exit:
890 ql_sem_unlock(qdev, SEM_FLASH_MASK);
891 return status;
892}
893
894/* xgmac register are located behind the xgmac_addr and xgmac_data
895 * register pair. Each read/write requires us to wait for the ready
896 * bit before reading/writing the data.
897 */
898static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899{
900 int status;
901 /* wait for reg to come ready */
902 status = ql_wait_reg_rdy(qdev,
903 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904 if (status)
905 return status;
906 /* write the data to the data reg */
907 ql_write32(qdev, XGMAC_DATA, data);
908 /* trigger the write */
909 ql_write32(qdev, XGMAC_ADDR, reg);
910 return status;
911}
912
913/* xgmac register are located behind the xgmac_addr and xgmac_data
914 * register pair. Each read/write requires us to wait for the ready
915 * bit before reading/writing the data.
916 */
917int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918{
919 int status = 0;
920 /* wait for reg to come ready */
921 status = ql_wait_reg_rdy(qdev,
922 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923 if (status)
924 goto exit;
925 /* set up for reg read */
926 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927 /* wait for reg to come ready */
928 status = ql_wait_reg_rdy(qdev,
929 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930 if (status)
931 goto exit;
932 /* get the data */
933 *data = ql_read32(qdev, XGMAC_DATA);
934exit:
935 return status;
936}
937
938/* This is used for reading the 64-bit statistics regs. */
939int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940{
941 int status = 0;
942 u32 hi = 0;
943 u32 lo = 0;
944
945 status = ql_read_xgmac_reg(qdev, reg, &lo);
946 if (status)
947 goto exit;
948
949 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950 if (status)
951 goto exit;
952
953 *data = (u64) lo | ((u64) hi << 32);
954
955exit:
956 return status;
957}
958
959static int ql_8000_port_initialize(struct ql_adapter *qdev)
960{
961 int status;
962 /*
963 * Get MPI firmware version for driver banner
964 * and ethool info.
965 */
966 status = ql_mb_about_fw(qdev);
967 if (status)
968 goto exit;
969 status = ql_mb_get_fw_state(qdev);
970 if (status)
971 goto exit;
972 /* Wake up a worker to get/set the TX/RX frame sizes. */
973 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974exit:
975 return status;
976}
977
978/* Take the MAC Core out of reset.
979 * Enable statistics counting.
980 * Take the transmitter/receiver out of reset.
981 * This functionality may be done in the MPI firmware at a
982 * later date.
983 */
984static int ql_8012_port_initialize(struct ql_adapter *qdev)
985{
986 int status = 0;
987 u32 data;
988
989 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990 /* Another function has the semaphore, so
991 * wait for the port init bit to come ready.
992 */
993 netif_info(qdev, link, qdev->ndev,
994 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996 if (status) {
997 netif_crit(qdev, link, qdev->ndev,
998 "Port initialize timed out.\n");
999 }
1000 return status;
1001 }
1002
1003 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1004 /* Set the core reset. */
1005 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006 if (status)
1007 goto end;
1008 data |= GLOBAL_CFG_RESET;
1009 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010 if (status)
1011 goto end;
1012
1013 /* Clear the core reset and turn on jumbo for receiver. */
1014 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1015 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1016 data |= GLOBAL_CFG_TX_STAT_EN;
1017 data |= GLOBAL_CFG_RX_STAT_EN;
1018 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019 if (status)
1020 goto end;
1021
1022 /* Enable transmitter, and clear it's reset. */
1023 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024 if (status)
1025 goto end;
1026 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1027 data |= TX_CFG_EN; /* Enable the transmitter. */
1028 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029 if (status)
1030 goto end;
1031
1032 /* Enable receiver and clear it's reset. */
1033 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034 if (status)
1035 goto end;
1036 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1037 data |= RX_CFG_EN; /* Enable the receiver. */
1038 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039 if (status)
1040 goto end;
1041
1042 /* Turn on jumbo. */
1043 status =
1044 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045 if (status)
1046 goto end;
1047 status =
1048 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049 if (status)
1050 goto end;
1051
1052 /* Signal to the world that the port is enabled. */
1053 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054end:
1055 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056 return status;
1057}
1058
1059static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060{
1061 return PAGE_SIZE << qdev->lbq_buf_order;
1062}
1063
1064/* Get the next large buffer. */
1065static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1066{
1067 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068 rx_ring->lbq_curr_idx++;
1069 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070 rx_ring->lbq_curr_idx = 0;
1071 rx_ring->lbq_free_cnt++;
1072 return lbq_desc;
1073}
1074
1075static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076 struct rx_ring *rx_ring)
1077{
1078 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079
1080 pci_dma_sync_single_for_cpu(qdev->pdev,
1081 dma_unmap_addr(lbq_desc, mapaddr),
1082 rx_ring->lbq_buf_size,
1083 PCI_DMA_FROMDEVICE);
1084
1085 /* If it's the last chunk of our master page then
1086 * we unmap it.
1087 */
1088 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089 == ql_lbq_block_size(qdev))
1090 pci_unmap_page(qdev->pdev,
1091 lbq_desc->p.pg_chunk.map,
1092 ql_lbq_block_size(qdev),
1093 PCI_DMA_FROMDEVICE);
1094 return lbq_desc;
1095}
1096
1097/* Get the next small buffer. */
1098static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1099{
1100 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101 rx_ring->sbq_curr_idx++;
1102 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103 rx_ring->sbq_curr_idx = 0;
1104 rx_ring->sbq_free_cnt++;
1105 return sbq_desc;
1106}
1107
1108/* Update an rx ring index. */
1109static void ql_update_cq(struct rx_ring *rx_ring)
1110{
1111 rx_ring->cnsmr_idx++;
1112 rx_ring->curr_entry++;
1113 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114 rx_ring->cnsmr_idx = 0;
1115 rx_ring->curr_entry = rx_ring->cq_base;
1116 }
1117}
1118
1119static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120{
1121 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122}
1123
1124static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125 struct bq_desc *lbq_desc)
1126{
1127 if (!rx_ring->pg_chunk.page) {
1128 u64 map;
1129 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130 GFP_ATOMIC,
1131 qdev->lbq_buf_order);
1132 if (unlikely(!rx_ring->pg_chunk.page)) {
1133 netif_err(qdev, drv, qdev->ndev,
1134 "page allocation failed.\n");
1135 return -ENOMEM;
1136 }
1137 rx_ring->pg_chunk.offset = 0;
1138 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139 0, ql_lbq_block_size(qdev),
1140 PCI_DMA_FROMDEVICE);
1141 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142 __free_pages(rx_ring->pg_chunk.page,
1143 qdev->lbq_buf_order);
1144 netif_err(qdev, drv, qdev->ndev,
1145 "PCI mapping failed.\n");
1146 return -ENOMEM;
1147 }
1148 rx_ring->pg_chunk.map = map;
1149 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150 }
1151
1152 /* Copy the current master pg_chunk info
1153 * to the current descriptor.
1154 */
1155 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156
1157 /* Adjust the master page chunk for next
1158 * buffer get.
1159 */
1160 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162 rx_ring->pg_chunk.page = NULL;
1163 lbq_desc->p.pg_chunk.last_flag = 1;
1164 } else {
1165 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166 get_page(rx_ring->pg_chunk.page);
1167 lbq_desc->p.pg_chunk.last_flag = 0;
1168 }
1169 return 0;
1170}
1171/* Process (refill) a large buffer queue. */
1172static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173{
1174 u32 clean_idx = rx_ring->lbq_clean_idx;
1175 u32 start_idx = clean_idx;
1176 struct bq_desc *lbq_desc;
1177 u64 map;
1178 int i;
1179
1180 while (rx_ring->lbq_free_cnt > 32) {
1181 for (i = 0; i < 16; i++) {
1182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: try cleaning clean_idx = %d.\n",
1184 clean_idx);
1185 lbq_desc = &rx_ring->lbq[clean_idx];
1186 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1187 netif_err(qdev, ifup, qdev->ndev,
1188 "Could not get a page chunk.\n");
1189 return;
1190 }
1191
1192 map = lbq_desc->p.pg_chunk.map +
1193 lbq_desc->p.pg_chunk.offset;
1194 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195 dma_unmap_len_set(lbq_desc, maplen,
1196 rx_ring->lbq_buf_size);
1197 *lbq_desc->addr = cpu_to_le64(map);
1198
1199 pci_dma_sync_single_for_device(qdev->pdev, map,
1200 rx_ring->lbq_buf_size,
1201 PCI_DMA_FROMDEVICE);
1202 clean_idx++;
1203 if (clean_idx == rx_ring->lbq_len)
1204 clean_idx = 0;
1205 }
1206
1207 rx_ring->lbq_clean_idx = clean_idx;
1208 rx_ring->lbq_prod_idx += 16;
1209 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210 rx_ring->lbq_prod_idx = 0;
1211 rx_ring->lbq_free_cnt -= 16;
1212 }
1213
1214 if (start_idx != clean_idx) {
1215 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216 "lbq: updating prod idx = %d.\n",
1217 rx_ring->lbq_prod_idx);
1218 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219 rx_ring->lbq_prod_idx_db_reg);
1220 }
1221}
1222
1223/* Process (refill) a small buffer queue. */
1224static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225{
1226 u32 clean_idx = rx_ring->sbq_clean_idx;
1227 u32 start_idx = clean_idx;
1228 struct bq_desc *sbq_desc;
1229 u64 map;
1230 int i;
1231
1232 while (rx_ring->sbq_free_cnt > 16) {
1233 for (i = 0; i < 16; i++) {
1234 sbq_desc = &rx_ring->sbq[clean_idx];
1235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236 "sbq: try cleaning clean_idx = %d.\n",
1237 clean_idx);
1238 if (sbq_desc->p.skb == NULL) {
1239 netif_printk(qdev, rx_status, KERN_DEBUG,
1240 qdev->ndev,
1241 "sbq: getting new skb for index %d.\n",
1242 sbq_desc->index);
1243 sbq_desc->p.skb =
1244 netdev_alloc_skb(qdev->ndev,
1245 SMALL_BUFFER_SIZE);
1246 if (sbq_desc->p.skb == NULL) {
1247 netif_err(qdev, probe, qdev->ndev,
1248 "Couldn't get an skb.\n");
1249 rx_ring->sbq_clean_idx = clean_idx;
1250 return;
1251 }
1252 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253 map = pci_map_single(qdev->pdev,
1254 sbq_desc->p.skb->data,
1255 rx_ring->sbq_buf_size,
1256 PCI_DMA_FROMDEVICE);
1257 if (pci_dma_mapping_error(qdev->pdev, map)) {
1258 netif_err(qdev, ifup, qdev->ndev,
1259 "PCI mapping failed.\n");
1260 rx_ring->sbq_clean_idx = clean_idx;
1261 dev_kfree_skb_any(sbq_desc->p.skb);
1262 sbq_desc->p.skb = NULL;
1263 return;
1264 }
1265 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266 dma_unmap_len_set(sbq_desc, maplen,
1267 rx_ring->sbq_buf_size);
1268 *sbq_desc->addr = cpu_to_le64(map);
1269 }
1270
1271 clean_idx++;
1272 if (clean_idx == rx_ring->sbq_len)
1273 clean_idx = 0;
1274 }
1275 rx_ring->sbq_clean_idx = clean_idx;
1276 rx_ring->sbq_prod_idx += 16;
1277 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278 rx_ring->sbq_prod_idx = 0;
1279 rx_ring->sbq_free_cnt -= 16;
1280 }
1281
1282 if (start_idx != clean_idx) {
1283 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284 "sbq: updating prod idx = %d.\n",
1285 rx_ring->sbq_prod_idx);
1286 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287 rx_ring->sbq_prod_idx_db_reg);
1288 }
1289}
1290
1291static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292 struct rx_ring *rx_ring)
1293{
1294 ql_update_sbq(qdev, rx_ring);
1295 ql_update_lbq(qdev, rx_ring);
1296}
1297
1298/* Unmaps tx buffers. Can be called from send() if a pci mapping
1299 * fails at some stage, or from the interrupt when a tx completes.
1300 */
1301static void ql_unmap_send(struct ql_adapter *qdev,
1302 struct tx_ring_desc *tx_ring_desc, int mapped)
1303{
1304 int i;
1305 for (i = 0; i < mapped; i++) {
1306 if (i == 0 || (i == 7 && mapped > 7)) {
1307 /*
1308 * Unmap the skb->data area, or the
1309 * external sglist (AKA the Outbound
1310 * Address List (OAL)).
1311 * If its the zeroeth element, then it's
1312 * the skb->data area. If it's the 7th
1313 * element and there is more than 6 frags,
1314 * then its an OAL.
1315 */
1316 if (i == 7) {
1317 netif_printk(qdev, tx_done, KERN_DEBUG,
1318 qdev->ndev,
1319 "unmapping OAL area.\n");
1320 }
1321 pci_unmap_single(qdev->pdev,
1322 dma_unmap_addr(&tx_ring_desc->map[i],
1323 mapaddr),
1324 dma_unmap_len(&tx_ring_desc->map[i],
1325 maplen),
1326 PCI_DMA_TODEVICE);
1327 } else {
1328 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329 "unmapping frag %d.\n", i);
1330 pci_unmap_page(qdev->pdev,
1331 dma_unmap_addr(&tx_ring_desc->map[i],
1332 mapaddr),
1333 dma_unmap_len(&tx_ring_desc->map[i],
1334 maplen), PCI_DMA_TODEVICE);
1335 }
1336 }
1337
1338}
1339
1340/* Map the buffers for this transmit. This will return
1341 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342 */
1343static int ql_map_send(struct ql_adapter *qdev,
1344 struct ob_mac_iocb_req *mac_iocb_ptr,
1345 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346{
1347 int len = skb_headlen(skb);
1348 dma_addr_t map;
1349 int frag_idx, err, map_idx = 0;
1350 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351 int frag_cnt = skb_shinfo(skb)->nr_frags;
1352
1353 if (frag_cnt) {
1354 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355 "frag_cnt = %d.\n", frag_cnt);
1356 }
1357 /*
1358 * Map the skb buffer first.
1359 */
1360 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361
1362 err = pci_dma_mapping_error(qdev->pdev, map);
1363 if (err) {
1364 netif_err(qdev, tx_queued, qdev->ndev,
1365 "PCI mapping failed with error: %d\n", err);
1366
1367 return NETDEV_TX_BUSY;
1368 }
1369
1370 tbd->len = cpu_to_le32(len);
1371 tbd->addr = cpu_to_le64(map);
1372 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1374 map_idx++;
1375
1376 /*
1377 * This loop fills the remainder of the 8 address descriptors
1378 * in the IOCB. If there are more than 7 fragments, then the
1379 * eighth address desc will point to an external list (OAL).
1380 * When this happens, the remainder of the frags will be stored
1381 * in this list.
1382 */
1383 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385 tbd++;
1386 if (frag_idx == 6 && frag_cnt > 7) {
1387 /* Let's tack on an sglist.
1388 * Our control block will now
1389 * look like this:
1390 * iocb->seg[0] = skb->data
1391 * iocb->seg[1] = frag[0]
1392 * iocb->seg[2] = frag[1]
1393 * iocb->seg[3] = frag[2]
1394 * iocb->seg[4] = frag[3]
1395 * iocb->seg[5] = frag[4]
1396 * iocb->seg[6] = frag[5]
1397 * iocb->seg[7] = ptr to OAL (external sglist)
1398 * oal->seg[0] = frag[6]
1399 * oal->seg[1] = frag[7]
1400 * oal->seg[2] = frag[8]
1401 * oal->seg[3] = frag[9]
1402 * oal->seg[4] = frag[10]
1403 * etc...
1404 */
1405 /* Tack on the OAL in the eighth segment of IOCB. */
1406 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407 sizeof(struct oal),
1408 PCI_DMA_TODEVICE);
1409 err = pci_dma_mapping_error(qdev->pdev, map);
1410 if (err) {
1411 netif_err(qdev, tx_queued, qdev->ndev,
1412 "PCI mapping outbound address list with error: %d\n",
1413 err);
1414 goto map_error;
1415 }
1416
1417 tbd->addr = cpu_to_le64(map);
1418 /*
1419 * The length is the number of fragments
1420 * that remain to be mapped times the length
1421 * of our sglist (OAL).
1422 */
1423 tbd->len =
1424 cpu_to_le32((sizeof(struct tx_buf_desc) *
1425 (frag_cnt - frag_idx)) | TX_DESC_C);
1426 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1427 map);
1428 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1429 sizeof(struct oal));
1430 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431 map_idx++;
1432 }
1433
1434 map =
1435 pci_map_page(qdev->pdev, frag->page,
1436 frag->page_offset, frag->size,
1437 PCI_DMA_TODEVICE);
1438
1439 err = pci_dma_mapping_error(qdev->pdev, map);
1440 if (err) {
1441 netif_err(qdev, tx_queued, qdev->ndev,
1442 "PCI mapping frags failed with error: %d.\n",
1443 err);
1444 goto map_error;
1445 }
1446
1447 tbd->addr = cpu_to_le64(map);
1448 tbd->len = cpu_to_le32(frag->size);
1449 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1450 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1451 frag->size);
1452
1453 }
1454 /* Save the number of segments we've mapped. */
1455 tx_ring_desc->map_cnt = map_idx;
1456 /* Terminate the last segment. */
1457 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1458 return NETDEV_TX_OK;
1459
1460map_error:
1461 /*
1462 * If the first frag mapping failed, then i will be zero.
1463 * This causes the unmap of the skb->data area. Otherwise
1464 * we pass in the number of frags that mapped successfully
1465 * so they can be umapped.
1466 */
1467 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1468 return NETDEV_TX_BUSY;
1469}
1470
1471/* Process an inbound completion from an rx ring. */
1472static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1473 struct rx_ring *rx_ring,
1474 struct ib_mac_iocb_rsp *ib_mac_rsp,
1475 u32 length,
1476 u16 vlan_id)
1477{
1478 struct sk_buff *skb;
1479 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1480 struct skb_frag_struct *rx_frag;
1481 int nr_frags;
1482 struct napi_struct *napi = &rx_ring->napi;
1483
1484 napi->dev = qdev->ndev;
1485
1486 skb = napi_get_frags(napi);
1487 if (!skb) {
1488 netif_err(qdev, drv, qdev->ndev,
1489 "Couldn't get an skb, exiting.\n");
1490 rx_ring->rx_dropped++;
1491 put_page(lbq_desc->p.pg_chunk.page);
1492 return;
1493 }
1494 prefetch(lbq_desc->p.pg_chunk.va);
1495 rx_frag = skb_shinfo(skb)->frags;
1496 nr_frags = skb_shinfo(skb)->nr_frags;
1497 rx_frag += nr_frags;
1498 rx_frag->page = lbq_desc->p.pg_chunk.page;
1499 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1500 rx_frag->size = length;
1501
1502 skb->len += length;
1503 skb->data_len += length;
1504 skb->truesize += length;
1505 skb_shinfo(skb)->nr_frags++;
1506
1507 rx_ring->rx_packets++;
1508 rx_ring->rx_bytes += length;
1509 skb->ip_summed = CHECKSUM_UNNECESSARY;
1510 skb_record_rx_queue(skb, rx_ring->cq_id);
1511 if (vlan_id != 0xffff)
1512 __vlan_hwaccel_put_tag(skb, vlan_id);
1513 napi_gro_frags(napi);
1514}
1515
1516/* Process an inbound completion from an rx ring. */
1517static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1518 struct rx_ring *rx_ring,
1519 struct ib_mac_iocb_rsp *ib_mac_rsp,
1520 u32 length,
1521 u16 vlan_id)
1522{
1523 struct net_device *ndev = qdev->ndev;
1524 struct sk_buff *skb = NULL;
1525 void *addr;
1526 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1527 struct napi_struct *napi = &rx_ring->napi;
1528
1529 skb = netdev_alloc_skb(ndev, length);
1530 if (!skb) {
1531 netif_err(qdev, drv, qdev->ndev,
1532 "Couldn't get an skb, need to unwind!.\n");
1533 rx_ring->rx_dropped++;
1534 put_page(lbq_desc->p.pg_chunk.page);
1535 return;
1536 }
1537
1538 addr = lbq_desc->p.pg_chunk.va;
1539 prefetch(addr);
1540
1541
1542 /* Frame error, so drop the packet. */
1543 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1544 netif_info(qdev, drv, qdev->ndev,
1545 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1546 rx_ring->rx_errors++;
1547 goto err_out;
1548 }
1549
1550 /* The max framesize filter on this chip is set higher than
1551 * MTU since FCoE uses 2k frames.
1552 */
1553 if (skb->len > ndev->mtu + ETH_HLEN) {
1554 netif_err(qdev, drv, qdev->ndev,
1555 "Segment too small, dropping.\n");
1556 rx_ring->rx_dropped++;
1557 goto err_out;
1558 }
1559 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1560 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1561 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1562 length);
1563 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1564 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1565 length-ETH_HLEN);
1566 skb->len += length-ETH_HLEN;
1567 skb->data_len += length-ETH_HLEN;
1568 skb->truesize += length-ETH_HLEN;
1569
1570 rx_ring->rx_packets++;
1571 rx_ring->rx_bytes += skb->len;
1572 skb->protocol = eth_type_trans(skb, ndev);
1573 skb_checksum_none_assert(skb);
1574
1575 if ((ndev->features & NETIF_F_RXCSUM) &&
1576 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1577 /* TCP frame. */
1578 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1579 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1580 "TCP checksum done!\n");
1581 skb->ip_summed = CHECKSUM_UNNECESSARY;
1582 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1583 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1584 /* Unfragmented ipv4 UDP frame. */
1585 struct iphdr *iph = (struct iphdr *) skb->data;
1586 if (!(iph->frag_off &
1587 cpu_to_be16(IP_MF|IP_OFFSET))) {
1588 skb->ip_summed = CHECKSUM_UNNECESSARY;
1589 netif_printk(qdev, rx_status, KERN_DEBUG,
1590 qdev->ndev,
1591 "TCP checksum done!\n");
1592 }
1593 }
1594 }
1595
1596 skb_record_rx_queue(skb, rx_ring->cq_id);
1597 if (vlan_id != 0xffff)
1598 __vlan_hwaccel_put_tag(skb, vlan_id);
1599 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1600 napi_gro_receive(napi, skb);
1601 else
1602 netif_receive_skb(skb);
1603 return;
1604err_out:
1605 dev_kfree_skb_any(skb);
1606 put_page(lbq_desc->p.pg_chunk.page);
1607}
1608
1609/* Process an inbound completion from an rx ring. */
1610static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1611 struct rx_ring *rx_ring,
1612 struct ib_mac_iocb_rsp *ib_mac_rsp,
1613 u32 length,
1614 u16 vlan_id)
1615{
1616 struct net_device *ndev = qdev->ndev;
1617 struct sk_buff *skb = NULL;
1618 struct sk_buff *new_skb = NULL;
1619 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1620
1621 skb = sbq_desc->p.skb;
1622 /* Allocate new_skb and copy */
1623 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1624 if (new_skb == NULL) {
1625 netif_err(qdev, probe, qdev->ndev,
1626 "No skb available, drop the packet.\n");
1627 rx_ring->rx_dropped++;
1628 return;
1629 }
1630 skb_reserve(new_skb, NET_IP_ALIGN);
1631 memcpy(skb_put(new_skb, length), skb->data, length);
1632 skb = new_skb;
1633
1634 /* Frame error, so drop the packet. */
1635 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1636 netif_info(qdev, drv, qdev->ndev,
1637 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1638 dev_kfree_skb_any(skb);
1639 rx_ring->rx_errors++;
1640 return;
1641 }
1642
1643 /* loopback self test for ethtool */
1644 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1645 ql_check_lb_frame(qdev, skb);
1646 dev_kfree_skb_any(skb);
1647 return;
1648 }
1649
1650 /* The max framesize filter on this chip is set higher than
1651 * MTU since FCoE uses 2k frames.
1652 */
1653 if (skb->len > ndev->mtu + ETH_HLEN) {
1654 dev_kfree_skb_any(skb);
1655 rx_ring->rx_dropped++;
1656 return;
1657 }
1658
1659 prefetch(skb->data);
1660 skb->dev = ndev;
1661 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1662 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1663 "%s Multicast.\n",
1664 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1665 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1666 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1667 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1668 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1669 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1670 }
1671 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1672 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1673 "Promiscuous Packet.\n");
1674
1675 rx_ring->rx_packets++;
1676 rx_ring->rx_bytes += skb->len;
1677 skb->protocol = eth_type_trans(skb, ndev);
1678 skb_checksum_none_assert(skb);
1679
1680 /* If rx checksum is on, and there are no
1681 * csum or frame errors.
1682 */
1683 if ((ndev->features & NETIF_F_RXCSUM) &&
1684 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1685 /* TCP frame. */
1686 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1687 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1688 "TCP checksum done!\n");
1689 skb->ip_summed = CHECKSUM_UNNECESSARY;
1690 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1691 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1692 /* Unfragmented ipv4 UDP frame. */
1693 struct iphdr *iph = (struct iphdr *) skb->data;
1694 if (!(iph->frag_off &
1695 ntohs(IP_MF|IP_OFFSET))) {
1696 skb->ip_summed = CHECKSUM_UNNECESSARY;
1697 netif_printk(qdev, rx_status, KERN_DEBUG,
1698 qdev->ndev,
1699 "TCP checksum done!\n");
1700 }
1701 }
1702 }
1703
1704 skb_record_rx_queue(skb, rx_ring->cq_id);
1705 if (vlan_id != 0xffff)
1706 __vlan_hwaccel_put_tag(skb, vlan_id);
1707 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1708 napi_gro_receive(&rx_ring->napi, skb);
1709 else
1710 netif_receive_skb(skb);
1711}
1712
1713static void ql_realign_skb(struct sk_buff *skb, int len)
1714{
1715 void *temp_addr = skb->data;
1716
1717 /* Undo the skb_reserve(skb,32) we did before
1718 * giving to hardware, and realign data on
1719 * a 2-byte boundary.
1720 */
1721 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1722 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1723 skb_copy_to_linear_data(skb, temp_addr,
1724 (unsigned int)len);
1725}
1726
1727/*
1728 * This function builds an skb for the given inbound
1729 * completion. It will be rewritten for readability in the near
1730 * future, but for not it works well.
1731 */
1732static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1733 struct rx_ring *rx_ring,
1734 struct ib_mac_iocb_rsp *ib_mac_rsp)
1735{
1736 struct bq_desc *lbq_desc;
1737 struct bq_desc *sbq_desc;
1738 struct sk_buff *skb = NULL;
1739 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1740 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1741
1742 /*
1743 * Handle the header buffer if present.
1744 */
1745 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1746 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1747 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1748 "Header of %d bytes in small buffer.\n", hdr_len);
1749 /*
1750 * Headers fit nicely into a small buffer.
1751 */
1752 sbq_desc = ql_get_curr_sbuf(rx_ring);
1753 pci_unmap_single(qdev->pdev,
1754 dma_unmap_addr(sbq_desc, mapaddr),
1755 dma_unmap_len(sbq_desc, maplen),
1756 PCI_DMA_FROMDEVICE);
1757 skb = sbq_desc->p.skb;
1758 ql_realign_skb(skb, hdr_len);
1759 skb_put(skb, hdr_len);
1760 sbq_desc->p.skb = NULL;
1761 }
1762
1763 /*
1764 * Handle the data buffer(s).
1765 */
1766 if (unlikely(!length)) { /* Is there data too? */
1767 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1768 "No Data buffer in this packet.\n");
1769 return skb;
1770 }
1771
1772 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1773 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1774 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775 "Headers in small, data of %d bytes in small, combine them.\n",
1776 length);
1777 /*
1778 * Data is less than small buffer size so it's
1779 * stuffed in a small buffer.
1780 * For this case we append the data
1781 * from the "data" small buffer to the "header" small
1782 * buffer.
1783 */
1784 sbq_desc = ql_get_curr_sbuf(rx_ring);
1785 pci_dma_sync_single_for_cpu(qdev->pdev,
1786 dma_unmap_addr
1787 (sbq_desc, mapaddr),
1788 dma_unmap_len
1789 (sbq_desc, maplen),
1790 PCI_DMA_FROMDEVICE);
1791 memcpy(skb_put(skb, length),
1792 sbq_desc->p.skb->data, length);
1793 pci_dma_sync_single_for_device(qdev->pdev,
1794 dma_unmap_addr
1795 (sbq_desc,
1796 mapaddr),
1797 dma_unmap_len
1798 (sbq_desc,
1799 maplen),
1800 PCI_DMA_FROMDEVICE);
1801 } else {
1802 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1803 "%d bytes in a single small buffer.\n",
1804 length);
1805 sbq_desc = ql_get_curr_sbuf(rx_ring);
1806 skb = sbq_desc->p.skb;
1807 ql_realign_skb(skb, length);
1808 skb_put(skb, length);
1809 pci_unmap_single(qdev->pdev,
1810 dma_unmap_addr(sbq_desc,
1811 mapaddr),
1812 dma_unmap_len(sbq_desc,
1813 maplen),
1814 PCI_DMA_FROMDEVICE);
1815 sbq_desc->p.skb = NULL;
1816 }
1817 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1818 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1819 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1820 "Header in small, %d bytes in large. Chain large to small!\n",
1821 length);
1822 /*
1823 * The data is in a single large buffer. We
1824 * chain it to the header buffer's skb and let
1825 * it rip.
1826 */
1827 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1828 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1829 "Chaining page at offset = %d, for %d bytes to skb.\n",
1830 lbq_desc->p.pg_chunk.offset, length);
1831 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1832 lbq_desc->p.pg_chunk.offset,
1833 length);
1834 skb->len += length;
1835 skb->data_len += length;
1836 skb->truesize += length;
1837 } else {
1838 /*
1839 * The headers and data are in a single large buffer. We
1840 * copy it to a new skb and let it go. This can happen with
1841 * jumbo mtu on a non-TCP/UDP frame.
1842 */
1843 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1844 skb = netdev_alloc_skb(qdev->ndev, length);
1845 if (skb == NULL) {
1846 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1847 "No skb available, drop the packet.\n");
1848 return NULL;
1849 }
1850 pci_unmap_page(qdev->pdev,
1851 dma_unmap_addr(lbq_desc,
1852 mapaddr),
1853 dma_unmap_len(lbq_desc, maplen),
1854 PCI_DMA_FROMDEVICE);
1855 skb_reserve(skb, NET_IP_ALIGN);
1856 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1857 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1858 length);
1859 skb_fill_page_desc(skb, 0,
1860 lbq_desc->p.pg_chunk.page,
1861 lbq_desc->p.pg_chunk.offset,
1862 length);
1863 skb->len += length;
1864 skb->data_len += length;
1865 skb->truesize += length;
1866 length -= length;
1867 __pskb_pull_tail(skb,
1868 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1869 VLAN_ETH_HLEN : ETH_HLEN);
1870 }
1871 } else {
1872 /*
1873 * The data is in a chain of large buffers
1874 * pointed to by a small buffer. We loop
1875 * thru and chain them to the our small header
1876 * buffer's skb.
1877 * frags: There are 18 max frags and our small
1878 * buffer will hold 32 of them. The thing is,
1879 * we'll use 3 max for our 9000 byte jumbo
1880 * frames. If the MTU goes up we could
1881 * eventually be in trouble.
1882 */
1883 int size, i = 0;
1884 sbq_desc = ql_get_curr_sbuf(rx_ring);
1885 pci_unmap_single(qdev->pdev,
1886 dma_unmap_addr(sbq_desc, mapaddr),
1887 dma_unmap_len(sbq_desc, maplen),
1888 PCI_DMA_FROMDEVICE);
1889 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1890 /*
1891 * This is an non TCP/UDP IP frame, so
1892 * the headers aren't split into a small
1893 * buffer. We have to use the small buffer
1894 * that contains our sg list as our skb to
1895 * send upstairs. Copy the sg list here to
1896 * a local buffer and use it to find the
1897 * pages to chain.
1898 */
1899 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1900 "%d bytes of headers & data in chain of large.\n",
1901 length);
1902 skb = sbq_desc->p.skb;
1903 sbq_desc->p.skb = NULL;
1904 skb_reserve(skb, NET_IP_ALIGN);
1905 }
1906 while (length > 0) {
1907 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1908 size = (length < rx_ring->lbq_buf_size) ? length :
1909 rx_ring->lbq_buf_size;
1910
1911 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1912 "Adding page %d to skb for %d bytes.\n",
1913 i, size);
1914 skb_fill_page_desc(skb, i,
1915 lbq_desc->p.pg_chunk.page,
1916 lbq_desc->p.pg_chunk.offset,
1917 size);
1918 skb->len += size;
1919 skb->data_len += size;
1920 skb->truesize += size;
1921 length -= size;
1922 i++;
1923 }
1924 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1925 VLAN_ETH_HLEN : ETH_HLEN);
1926 }
1927 return skb;
1928}
1929
1930/* Process an inbound completion from an rx ring. */
1931static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1932 struct rx_ring *rx_ring,
1933 struct ib_mac_iocb_rsp *ib_mac_rsp,
1934 u16 vlan_id)
1935{
1936 struct net_device *ndev = qdev->ndev;
1937 struct sk_buff *skb = NULL;
1938
1939 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1940
1941 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1942 if (unlikely(!skb)) {
1943 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1944 "No skb available, drop packet.\n");
1945 rx_ring->rx_dropped++;
1946 return;
1947 }
1948
1949 /* Frame error, so drop the packet. */
1950 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1951 netif_info(qdev, drv, qdev->ndev,
1952 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1953 dev_kfree_skb_any(skb);
1954 rx_ring->rx_errors++;
1955 return;
1956 }
1957
1958 /* The max framesize filter on this chip is set higher than
1959 * MTU since FCoE uses 2k frames.
1960 */
1961 if (skb->len > ndev->mtu + ETH_HLEN) {
1962 dev_kfree_skb_any(skb);
1963 rx_ring->rx_dropped++;
1964 return;
1965 }
1966
1967 /* loopback self test for ethtool */
1968 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1969 ql_check_lb_frame(qdev, skb);
1970 dev_kfree_skb_any(skb);
1971 return;
1972 }
1973
1974 prefetch(skb->data);
1975 skb->dev = ndev;
1976 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1977 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1978 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1979 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1980 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1981 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1982 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1983 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1984 rx_ring->rx_multicast++;
1985 }
1986 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1987 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1988 "Promiscuous Packet.\n");
1989 }
1990
1991 skb->protocol = eth_type_trans(skb, ndev);
1992 skb_checksum_none_assert(skb);
1993
1994 /* If rx checksum is on, and there are no
1995 * csum or frame errors.
1996 */
1997 if ((ndev->features & NETIF_F_RXCSUM) &&
1998 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1999 /* TCP frame. */
2000 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2001 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2002 "TCP checksum done!\n");
2003 skb->ip_summed = CHECKSUM_UNNECESSARY;
2004 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2005 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2006 /* Unfragmented ipv4 UDP frame. */
2007 struct iphdr *iph = (struct iphdr *) skb->data;
2008 if (!(iph->frag_off &
2009 ntohs(IP_MF|IP_OFFSET))) {
2010 skb->ip_summed = CHECKSUM_UNNECESSARY;
2011 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012 "TCP checksum done!\n");
2013 }
2014 }
2015 }
2016
2017 rx_ring->rx_packets++;
2018 rx_ring->rx_bytes += skb->len;
2019 skb_record_rx_queue(skb, rx_ring->cq_id);
2020 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2021 __vlan_hwaccel_put_tag(skb, vlan_id);
2022 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2023 napi_gro_receive(&rx_ring->napi, skb);
2024 else
2025 netif_receive_skb(skb);
2026}
2027
2028/* Process an inbound completion from an rx ring. */
2029static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2030 struct rx_ring *rx_ring,
2031 struct ib_mac_iocb_rsp *ib_mac_rsp)
2032{
2033 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2034 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2035 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2036 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2037
2038 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2039
2040 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2041 /* The data and headers are split into
2042 * separate buffers.
2043 */
2044 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2045 vlan_id);
2046 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2047 /* The data fit in a single small buffer.
2048 * Allocate a new skb, copy the data and
2049 * return the buffer to the free pool.
2050 */
2051 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2052 length, vlan_id);
2053 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2054 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2055 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2056 /* TCP packet in a page chunk that's been checksummed.
2057 * Tack it on to our GRO skb and let it go.
2058 */
2059 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2060 length, vlan_id);
2061 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2062 /* Non-TCP packet in a page chunk. Allocate an
2063 * skb, tack it on frags, and send it up.
2064 */
2065 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2066 length, vlan_id);
2067 } else {
2068 /* Non-TCP/UDP large frames that span multiple buffers
2069 * can be processed corrrectly by the split frame logic.
2070 */
2071 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2072 vlan_id);
2073 }
2074
2075 return (unsigned long)length;
2076}
2077
2078/* Process an outbound completion from an rx ring. */
2079static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2080 struct ob_mac_iocb_rsp *mac_rsp)
2081{
2082 struct tx_ring *tx_ring;
2083 struct tx_ring_desc *tx_ring_desc;
2084
2085 QL_DUMP_OB_MAC_RSP(mac_rsp);
2086 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2087 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2088 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2089 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2090 tx_ring->tx_packets++;
2091 dev_kfree_skb(tx_ring_desc->skb);
2092 tx_ring_desc->skb = NULL;
2093
2094 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2095 OB_MAC_IOCB_RSP_S |
2096 OB_MAC_IOCB_RSP_L |
2097 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2098 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2099 netif_warn(qdev, tx_done, qdev->ndev,
2100 "Total descriptor length did not match transfer length.\n");
2101 }
2102 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2103 netif_warn(qdev, tx_done, qdev->ndev,
2104 "Frame too short to be valid, not sent.\n");
2105 }
2106 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2107 netif_warn(qdev, tx_done, qdev->ndev,
2108 "Frame too long, but sent anyway.\n");
2109 }
2110 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2111 netif_warn(qdev, tx_done, qdev->ndev,
2112 "PCI backplane error. Frame not sent.\n");
2113 }
2114 }
2115 atomic_inc(&tx_ring->tx_count);
2116}
2117
2118/* Fire up a handler to reset the MPI processor. */
2119void ql_queue_fw_error(struct ql_adapter *qdev)
2120{
2121 ql_link_off(qdev);
2122 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2123}
2124
2125void ql_queue_asic_error(struct ql_adapter *qdev)
2126{
2127 ql_link_off(qdev);
2128 ql_disable_interrupts(qdev);
2129 /* Clear adapter up bit to signal the recovery
2130 * process that it shouldn't kill the reset worker
2131 * thread
2132 */
2133 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2134 /* Set asic recovery bit to indicate reset process that we are
2135 * in fatal error recovery process rather than normal close
2136 */
2137 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2138 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2139}
2140
2141static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2142 struct ib_ae_iocb_rsp *ib_ae_rsp)
2143{
2144 switch (ib_ae_rsp->event) {
2145 case MGMT_ERR_EVENT:
2146 netif_err(qdev, rx_err, qdev->ndev,
2147 "Management Processor Fatal Error.\n");
2148 ql_queue_fw_error(qdev);
2149 return;
2150
2151 case CAM_LOOKUP_ERR_EVENT:
2152 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2153 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2154 ql_queue_asic_error(qdev);
2155 return;
2156
2157 case SOFT_ECC_ERROR_EVENT:
2158 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2159 ql_queue_asic_error(qdev);
2160 break;
2161
2162 case PCI_ERR_ANON_BUF_RD:
2163 netdev_err(qdev->ndev, "PCI error occurred when reading "
2164 "anonymous buffers from rx_ring %d.\n",
2165 ib_ae_rsp->q_id);
2166 ql_queue_asic_error(qdev);
2167 break;
2168
2169 default:
2170 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2171 ib_ae_rsp->event);
2172 ql_queue_asic_error(qdev);
2173 break;
2174 }
2175}
2176
2177static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2178{
2179 struct ql_adapter *qdev = rx_ring->qdev;
2180 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2181 struct ob_mac_iocb_rsp *net_rsp = NULL;
2182 int count = 0;
2183
2184 struct tx_ring *tx_ring;
2185 /* While there are entries in the completion queue. */
2186 while (prod != rx_ring->cnsmr_idx) {
2187
2188 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2189 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2190 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2191
2192 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2193 rmb();
2194 switch (net_rsp->opcode) {
2195
2196 case OPCODE_OB_MAC_TSO_IOCB:
2197 case OPCODE_OB_MAC_IOCB:
2198 ql_process_mac_tx_intr(qdev, net_rsp);
2199 break;
2200 default:
2201 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2202 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2203 net_rsp->opcode);
2204 }
2205 count++;
2206 ql_update_cq(rx_ring);
2207 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2208 }
2209 if (!net_rsp)
2210 return 0;
2211 ql_write_cq_idx(rx_ring);
2212 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2213 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2214 if (atomic_read(&tx_ring->queue_stopped) &&
2215 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2216 /*
2217 * The queue got stopped because the tx_ring was full.
2218 * Wake it up, because it's now at least 25% empty.
2219 */
2220 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2221 }
2222
2223 return count;
2224}
2225
2226static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2227{
2228 struct ql_adapter *qdev = rx_ring->qdev;
2229 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2230 struct ql_net_rsp_iocb *net_rsp;
2231 int count = 0;
2232
2233 /* While there are entries in the completion queue. */
2234 while (prod != rx_ring->cnsmr_idx) {
2235
2236 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2237 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2238 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2239
2240 net_rsp = rx_ring->curr_entry;
2241 rmb();
2242 switch (net_rsp->opcode) {
2243 case OPCODE_IB_MAC_IOCB:
2244 ql_process_mac_rx_intr(qdev, rx_ring,
2245 (struct ib_mac_iocb_rsp *)
2246 net_rsp);
2247 break;
2248
2249 case OPCODE_IB_AE_IOCB:
2250 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2251 net_rsp);
2252 break;
2253 default:
2254 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2255 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2256 net_rsp->opcode);
2257 break;
2258 }
2259 count++;
2260 ql_update_cq(rx_ring);
2261 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2262 if (count == budget)
2263 break;
2264 }
2265 ql_update_buffer_queues(qdev, rx_ring);
2266 ql_write_cq_idx(rx_ring);
2267 return count;
2268}
2269
2270static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2271{
2272 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2273 struct ql_adapter *qdev = rx_ring->qdev;
2274 struct rx_ring *trx_ring;
2275 int i, work_done = 0;
2276 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2277
2278 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2279 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2280
2281 /* Service the TX rings first. They start
2282 * right after the RSS rings. */
2283 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2284 trx_ring = &qdev->rx_ring[i];
2285 /* If this TX completion ring belongs to this vector and
2286 * it's not empty then service it.
2287 */
2288 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2289 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2290 trx_ring->cnsmr_idx)) {
2291 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2292 "%s: Servicing TX completion ring %d.\n",
2293 __func__, trx_ring->cq_id);
2294 ql_clean_outbound_rx_ring(trx_ring);
2295 }
2296 }
2297
2298 /*
2299 * Now service the RSS ring if it's active.
2300 */
2301 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2302 rx_ring->cnsmr_idx) {
2303 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2304 "%s: Servicing RX completion ring %d.\n",
2305 __func__, rx_ring->cq_id);
2306 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2307 }
2308
2309 if (work_done < budget) {
2310 napi_complete(napi);
2311 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2312 }
2313 return work_done;
2314}
2315
2316static void qlge_vlan_mode(struct net_device *ndev, u32 features)
2317{
2318 struct ql_adapter *qdev = netdev_priv(ndev);
2319
2320 if (features & NETIF_F_HW_VLAN_RX) {
2321 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2322 "Turning on VLAN in NIC_RCV_CFG.\n");
2323 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2324 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2325 } else {
2326 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2327 "Turning off VLAN in NIC_RCV_CFG.\n");
2328 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2329 }
2330}
2331
2332static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2333{
2334 /*
2335 * Since there is no support for separate rx/tx vlan accel
2336 * enable/disable make sure tx flag is always in same state as rx.
2337 */
2338 if (features & NETIF_F_HW_VLAN_RX)
2339 features |= NETIF_F_HW_VLAN_TX;
2340 else
2341 features &= ~NETIF_F_HW_VLAN_TX;
2342
2343 return features;
2344}
2345
2346static int qlge_set_features(struct net_device *ndev, u32 features)
2347{
2348 u32 changed = ndev->features ^ features;
2349
2350 if (changed & NETIF_F_HW_VLAN_RX)
2351 qlge_vlan_mode(ndev, features);
2352
2353 return 0;
2354}
2355
2356static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2357{
2358 u32 enable_bit = MAC_ADDR_E;
2359
2360 if (ql_set_mac_addr_reg
2361 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2362 netif_err(qdev, ifup, qdev->ndev,
2363 "Failed to init vlan address.\n");
2364 }
2365}
2366
2367static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2368{
2369 struct ql_adapter *qdev = netdev_priv(ndev);
2370 int status;
2371
2372 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2373 if (status)
2374 return;
2375
2376 __qlge_vlan_rx_add_vid(qdev, vid);
2377 set_bit(vid, qdev->active_vlans);
2378
2379 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2380}
2381
2382static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2383{
2384 u32 enable_bit = 0;
2385
2386 if (ql_set_mac_addr_reg
2387 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2388 netif_err(qdev, ifup, qdev->ndev,
2389 "Failed to clear vlan address.\n");
2390 }
2391}
2392
2393static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2394{
2395 struct ql_adapter *qdev = netdev_priv(ndev);
2396 int status;
2397
2398 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2399 if (status)
2400 return;
2401
2402 __qlge_vlan_rx_kill_vid(qdev, vid);
2403 clear_bit(vid, qdev->active_vlans);
2404
2405 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2406}
2407
2408static void qlge_restore_vlan(struct ql_adapter *qdev)
2409{
2410 int status;
2411 u16 vid;
2412
2413 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2414 if (status)
2415 return;
2416
2417 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2418 __qlge_vlan_rx_add_vid(qdev, vid);
2419
2420 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2421}
2422
2423/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2424static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2425{
2426 struct rx_ring *rx_ring = dev_id;
2427 napi_schedule(&rx_ring->napi);
2428 return IRQ_HANDLED;
2429}
2430
2431/* This handles a fatal error, MPI activity, and the default
2432 * rx_ring in an MSI-X multiple vector environment.
2433 * In MSI/Legacy environment it also process the rest of
2434 * the rx_rings.
2435 */
2436static irqreturn_t qlge_isr(int irq, void *dev_id)
2437{
2438 struct rx_ring *rx_ring = dev_id;
2439 struct ql_adapter *qdev = rx_ring->qdev;
2440 struct intr_context *intr_context = &qdev->intr_context[0];
2441 u32 var;
2442 int work_done = 0;
2443
2444 spin_lock(&qdev->hw_lock);
2445 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2446 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2447 "Shared Interrupt, Not ours!\n");
2448 spin_unlock(&qdev->hw_lock);
2449 return IRQ_NONE;
2450 }
2451 spin_unlock(&qdev->hw_lock);
2452
2453 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2454
2455 /*
2456 * Check for fatal error.
2457 */
2458 if (var & STS_FE) {
2459 ql_queue_asic_error(qdev);
2460 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2461 var = ql_read32(qdev, ERR_STS);
2462 netdev_err(qdev->ndev, "Resetting chip. "
2463 "Error Status Register = 0x%x\n", var);
2464 return IRQ_HANDLED;
2465 }
2466
2467 /*
2468 * Check MPI processor activity.
2469 */
2470 if ((var & STS_PI) &&
2471 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2472 /*
2473 * We've got an async event or mailbox completion.
2474 * Handle it and clear the source of the interrupt.
2475 */
2476 netif_err(qdev, intr, qdev->ndev,
2477 "Got MPI processor interrupt.\n");
2478 ql_disable_completion_interrupt(qdev, intr_context->intr);
2479 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2480 queue_delayed_work_on(smp_processor_id(),
2481 qdev->workqueue, &qdev->mpi_work, 0);
2482 work_done++;
2483 }
2484
2485 /*
2486 * Get the bit-mask that shows the active queues for this
2487 * pass. Compare it to the queues that this irq services
2488 * and call napi if there's a match.
2489 */
2490 var = ql_read32(qdev, ISR1);
2491 if (var & intr_context->irq_mask) {
2492 netif_info(qdev, intr, qdev->ndev,
2493 "Waking handler for rx_ring[0].\n");
2494 ql_disable_completion_interrupt(qdev, intr_context->intr);
2495 napi_schedule(&rx_ring->napi);
2496 work_done++;
2497 }
2498 ql_enable_completion_interrupt(qdev, intr_context->intr);
2499 return work_done ? IRQ_HANDLED : IRQ_NONE;
2500}
2501
2502static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2503{
2504
2505 if (skb_is_gso(skb)) {
2506 int err;
2507 if (skb_header_cloned(skb)) {
2508 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2509 if (err)
2510 return err;
2511 }
2512
2513 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2514 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2515 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2516 mac_iocb_ptr->total_hdrs_len =
2517 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2518 mac_iocb_ptr->net_trans_offset =
2519 cpu_to_le16(skb_network_offset(skb) |
2520 skb_transport_offset(skb)
2521 << OB_MAC_TRANSPORT_HDR_SHIFT);
2522 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2523 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2524 if (likely(skb->protocol == htons(ETH_P_IP))) {
2525 struct iphdr *iph = ip_hdr(skb);
2526 iph->check = 0;
2527 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2528 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2529 iph->daddr, 0,
2530 IPPROTO_TCP,
2531 0);
2532 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2533 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2534 tcp_hdr(skb)->check =
2535 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2536 &ipv6_hdr(skb)->daddr,
2537 0, IPPROTO_TCP, 0);
2538 }
2539 return 1;
2540 }
2541 return 0;
2542}
2543
2544static void ql_hw_csum_setup(struct sk_buff *skb,
2545 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2546{
2547 int len;
2548 struct iphdr *iph = ip_hdr(skb);
2549 __sum16 *check;
2550 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2551 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2552 mac_iocb_ptr->net_trans_offset =
2553 cpu_to_le16(skb_network_offset(skb) |
2554 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2555
2556 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2557 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2558 if (likely(iph->protocol == IPPROTO_TCP)) {
2559 check = &(tcp_hdr(skb)->check);
2560 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2561 mac_iocb_ptr->total_hdrs_len =
2562 cpu_to_le16(skb_transport_offset(skb) +
2563 (tcp_hdr(skb)->doff << 2));
2564 } else {
2565 check = &(udp_hdr(skb)->check);
2566 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2567 mac_iocb_ptr->total_hdrs_len =
2568 cpu_to_le16(skb_transport_offset(skb) +
2569 sizeof(struct udphdr));
2570 }
2571 *check = ~csum_tcpudp_magic(iph->saddr,
2572 iph->daddr, len, iph->protocol, 0);
2573}
2574
2575static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2576{
2577 struct tx_ring_desc *tx_ring_desc;
2578 struct ob_mac_iocb_req *mac_iocb_ptr;
2579 struct ql_adapter *qdev = netdev_priv(ndev);
2580 int tso;
2581 struct tx_ring *tx_ring;
2582 u32 tx_ring_idx = (u32) skb->queue_mapping;
2583
2584 tx_ring = &qdev->tx_ring[tx_ring_idx];
2585
2586 if (skb_padto(skb, ETH_ZLEN))
2587 return NETDEV_TX_OK;
2588
2589 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2590 netif_info(qdev, tx_queued, qdev->ndev,
2591 "%s: shutting down tx queue %d du to lack of resources.\n",
2592 __func__, tx_ring_idx);
2593 netif_stop_subqueue(ndev, tx_ring->wq_id);
2594 atomic_inc(&tx_ring->queue_stopped);
2595 tx_ring->tx_errors++;
2596 return NETDEV_TX_BUSY;
2597 }
2598 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2599 mac_iocb_ptr = tx_ring_desc->queue_entry;
2600 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2601
2602 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2603 mac_iocb_ptr->tid = tx_ring_desc->index;
2604 /* We use the upper 32-bits to store the tx queue for this IO.
2605 * When we get the completion we can use it to establish the context.
2606 */
2607 mac_iocb_ptr->txq_idx = tx_ring_idx;
2608 tx_ring_desc->skb = skb;
2609
2610 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2611
2612 if (vlan_tx_tag_present(skb)) {
2613 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2614 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2615 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2616 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2617 }
2618 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2619 if (tso < 0) {
2620 dev_kfree_skb_any(skb);
2621 return NETDEV_TX_OK;
2622 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2623 ql_hw_csum_setup(skb,
2624 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2625 }
2626 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2627 NETDEV_TX_OK) {
2628 netif_err(qdev, tx_queued, qdev->ndev,
2629 "Could not map the segments.\n");
2630 tx_ring->tx_errors++;
2631 return NETDEV_TX_BUSY;
2632 }
2633 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2634 tx_ring->prod_idx++;
2635 if (tx_ring->prod_idx == tx_ring->wq_len)
2636 tx_ring->prod_idx = 0;
2637 wmb();
2638
2639 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2640 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2641 "tx queued, slot %d, len %d\n",
2642 tx_ring->prod_idx, skb->len);
2643
2644 atomic_dec(&tx_ring->tx_count);
2645 return NETDEV_TX_OK;
2646}
2647
2648
2649static void ql_free_shadow_space(struct ql_adapter *qdev)
2650{
2651 if (qdev->rx_ring_shadow_reg_area) {
2652 pci_free_consistent(qdev->pdev,
2653 PAGE_SIZE,
2654 qdev->rx_ring_shadow_reg_area,
2655 qdev->rx_ring_shadow_reg_dma);
2656 qdev->rx_ring_shadow_reg_area = NULL;
2657 }
2658 if (qdev->tx_ring_shadow_reg_area) {
2659 pci_free_consistent(qdev->pdev,
2660 PAGE_SIZE,
2661 qdev->tx_ring_shadow_reg_area,
2662 qdev->tx_ring_shadow_reg_dma);
2663 qdev->tx_ring_shadow_reg_area = NULL;
2664 }
2665}
2666
2667static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2668{
2669 qdev->rx_ring_shadow_reg_area =
2670 pci_alloc_consistent(qdev->pdev,
2671 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2672 if (qdev->rx_ring_shadow_reg_area == NULL) {
2673 netif_err(qdev, ifup, qdev->ndev,
2674 "Allocation of RX shadow space failed.\n");
2675 return -ENOMEM;
2676 }
2677 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2678 qdev->tx_ring_shadow_reg_area =
2679 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2680 &qdev->tx_ring_shadow_reg_dma);
2681 if (qdev->tx_ring_shadow_reg_area == NULL) {
2682 netif_err(qdev, ifup, qdev->ndev,
2683 "Allocation of TX shadow space failed.\n");
2684 goto err_wqp_sh_area;
2685 }
2686 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2687 return 0;
2688
2689err_wqp_sh_area:
2690 pci_free_consistent(qdev->pdev,
2691 PAGE_SIZE,
2692 qdev->rx_ring_shadow_reg_area,
2693 qdev->rx_ring_shadow_reg_dma);
2694 return -ENOMEM;
2695}
2696
2697static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2698{
2699 struct tx_ring_desc *tx_ring_desc;
2700 int i;
2701 struct ob_mac_iocb_req *mac_iocb_ptr;
2702
2703 mac_iocb_ptr = tx_ring->wq_base;
2704 tx_ring_desc = tx_ring->q;
2705 for (i = 0; i < tx_ring->wq_len; i++) {
2706 tx_ring_desc->index = i;
2707 tx_ring_desc->skb = NULL;
2708 tx_ring_desc->queue_entry = mac_iocb_ptr;
2709 mac_iocb_ptr++;
2710 tx_ring_desc++;
2711 }
2712 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2713 atomic_set(&tx_ring->queue_stopped, 0);
2714}
2715
2716static void ql_free_tx_resources(struct ql_adapter *qdev,
2717 struct tx_ring *tx_ring)
2718{
2719 if (tx_ring->wq_base) {
2720 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2721 tx_ring->wq_base, tx_ring->wq_base_dma);
2722 tx_ring->wq_base = NULL;
2723 }
2724 kfree(tx_ring->q);
2725 tx_ring->q = NULL;
2726}
2727
2728static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2729 struct tx_ring *tx_ring)
2730{
2731 tx_ring->wq_base =
2732 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2733 &tx_ring->wq_base_dma);
2734
2735 if ((tx_ring->wq_base == NULL) ||
2736 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2737 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2738 return -ENOMEM;
2739 }
2740 tx_ring->q =
2741 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2742 if (tx_ring->q == NULL)
2743 goto err;
2744
2745 return 0;
2746err:
2747 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2748 tx_ring->wq_base, tx_ring->wq_base_dma);
2749 return -ENOMEM;
2750}
2751
2752static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2753{
2754 struct bq_desc *lbq_desc;
2755
2756 uint32_t curr_idx, clean_idx;
2757
2758 curr_idx = rx_ring->lbq_curr_idx;
2759 clean_idx = rx_ring->lbq_clean_idx;
2760 while (curr_idx != clean_idx) {
2761 lbq_desc = &rx_ring->lbq[curr_idx];
2762
2763 if (lbq_desc->p.pg_chunk.last_flag) {
2764 pci_unmap_page(qdev->pdev,
2765 lbq_desc->p.pg_chunk.map,
2766 ql_lbq_block_size(qdev),
2767 PCI_DMA_FROMDEVICE);
2768 lbq_desc->p.pg_chunk.last_flag = 0;
2769 }
2770
2771 put_page(lbq_desc->p.pg_chunk.page);
2772 lbq_desc->p.pg_chunk.page = NULL;
2773
2774 if (++curr_idx == rx_ring->lbq_len)
2775 curr_idx = 0;
2776
2777 }
2778}
2779
2780static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2781{
2782 int i;
2783 struct bq_desc *sbq_desc;
2784
2785 for (i = 0; i < rx_ring->sbq_len; i++) {
2786 sbq_desc = &rx_ring->sbq[i];
2787 if (sbq_desc == NULL) {
2788 netif_err(qdev, ifup, qdev->ndev,
2789 "sbq_desc %d is NULL.\n", i);
2790 return;
2791 }
2792 if (sbq_desc->p.skb) {
2793 pci_unmap_single(qdev->pdev,
2794 dma_unmap_addr(sbq_desc, mapaddr),
2795 dma_unmap_len(sbq_desc, maplen),
2796 PCI_DMA_FROMDEVICE);
2797 dev_kfree_skb(sbq_desc->p.skb);
2798 sbq_desc->p.skb = NULL;
2799 }
2800 }
2801}
2802
2803/* Free all large and small rx buffers associated
2804 * with the completion queues for this device.
2805 */
2806static void ql_free_rx_buffers(struct ql_adapter *qdev)
2807{
2808 int i;
2809 struct rx_ring *rx_ring;
2810
2811 for (i = 0; i < qdev->rx_ring_count; i++) {
2812 rx_ring = &qdev->rx_ring[i];
2813 if (rx_ring->lbq)
2814 ql_free_lbq_buffers(qdev, rx_ring);
2815 if (rx_ring->sbq)
2816 ql_free_sbq_buffers(qdev, rx_ring);
2817 }
2818}
2819
2820static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2821{
2822 struct rx_ring *rx_ring;
2823 int i;
2824
2825 for (i = 0; i < qdev->rx_ring_count; i++) {
2826 rx_ring = &qdev->rx_ring[i];
2827 if (rx_ring->type != TX_Q)
2828 ql_update_buffer_queues(qdev, rx_ring);
2829 }
2830}
2831
2832static void ql_init_lbq_ring(struct ql_adapter *qdev,
2833 struct rx_ring *rx_ring)
2834{
2835 int i;
2836 struct bq_desc *lbq_desc;
2837 __le64 *bq = rx_ring->lbq_base;
2838
2839 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2840 for (i = 0; i < rx_ring->lbq_len; i++) {
2841 lbq_desc = &rx_ring->lbq[i];
2842 memset(lbq_desc, 0, sizeof(*lbq_desc));
2843 lbq_desc->index = i;
2844 lbq_desc->addr = bq;
2845 bq++;
2846 }
2847}
2848
2849static void ql_init_sbq_ring(struct ql_adapter *qdev,
2850 struct rx_ring *rx_ring)
2851{
2852 int i;
2853 struct bq_desc *sbq_desc;
2854 __le64 *bq = rx_ring->sbq_base;
2855
2856 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2857 for (i = 0; i < rx_ring->sbq_len; i++) {
2858 sbq_desc = &rx_ring->sbq[i];
2859 memset(sbq_desc, 0, sizeof(*sbq_desc));
2860 sbq_desc->index = i;
2861 sbq_desc->addr = bq;
2862 bq++;
2863 }
2864}
2865
2866static void ql_free_rx_resources(struct ql_adapter *qdev,
2867 struct rx_ring *rx_ring)
2868{
2869 /* Free the small buffer queue. */
2870 if (rx_ring->sbq_base) {
2871 pci_free_consistent(qdev->pdev,
2872 rx_ring->sbq_size,
2873 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2874 rx_ring->sbq_base = NULL;
2875 }
2876
2877 /* Free the small buffer queue control blocks. */
2878 kfree(rx_ring->sbq);
2879 rx_ring->sbq = NULL;
2880
2881 /* Free the large buffer queue. */
2882 if (rx_ring->lbq_base) {
2883 pci_free_consistent(qdev->pdev,
2884 rx_ring->lbq_size,
2885 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2886 rx_ring->lbq_base = NULL;
2887 }
2888
2889 /* Free the large buffer queue control blocks. */
2890 kfree(rx_ring->lbq);
2891 rx_ring->lbq = NULL;
2892
2893 /* Free the rx queue. */
2894 if (rx_ring->cq_base) {
2895 pci_free_consistent(qdev->pdev,
2896 rx_ring->cq_size,
2897 rx_ring->cq_base, rx_ring->cq_base_dma);
2898 rx_ring->cq_base = NULL;
2899 }
2900}
2901
2902/* Allocate queues and buffers for this completions queue based
2903 * on the values in the parameter structure. */
2904static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2905 struct rx_ring *rx_ring)
2906{
2907
2908 /*
2909 * Allocate the completion queue for this rx_ring.
2910 */
2911 rx_ring->cq_base =
2912 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2913 &rx_ring->cq_base_dma);
2914
2915 if (rx_ring->cq_base == NULL) {
2916 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2917 return -ENOMEM;
2918 }
2919
2920 if (rx_ring->sbq_len) {
2921 /*
2922 * Allocate small buffer queue.
2923 */
2924 rx_ring->sbq_base =
2925 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2926 &rx_ring->sbq_base_dma);
2927
2928 if (rx_ring->sbq_base == NULL) {
2929 netif_err(qdev, ifup, qdev->ndev,
2930 "Small buffer queue allocation failed.\n");
2931 goto err_mem;
2932 }
2933
2934 /*
2935 * Allocate small buffer queue control blocks.
2936 */
2937 rx_ring->sbq =
2938 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2939 GFP_KERNEL);
2940 if (rx_ring->sbq == NULL) {
2941 netif_err(qdev, ifup, qdev->ndev,
2942 "Small buffer queue control block allocation failed.\n");
2943 goto err_mem;
2944 }
2945
2946 ql_init_sbq_ring(qdev, rx_ring);
2947 }
2948
2949 if (rx_ring->lbq_len) {
2950 /*
2951 * Allocate large buffer queue.
2952 */
2953 rx_ring->lbq_base =
2954 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2955 &rx_ring->lbq_base_dma);
2956
2957 if (rx_ring->lbq_base == NULL) {
2958 netif_err(qdev, ifup, qdev->ndev,
2959 "Large buffer queue allocation failed.\n");
2960 goto err_mem;
2961 }
2962 /*
2963 * Allocate large buffer queue control blocks.
2964 */
2965 rx_ring->lbq =
2966 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2967 GFP_KERNEL);
2968 if (rx_ring->lbq == NULL) {
2969 netif_err(qdev, ifup, qdev->ndev,
2970 "Large buffer queue control block allocation failed.\n");
2971 goto err_mem;
2972 }
2973
2974 ql_init_lbq_ring(qdev, rx_ring);
2975 }
2976
2977 return 0;
2978
2979err_mem:
2980 ql_free_rx_resources(qdev, rx_ring);
2981 return -ENOMEM;
2982}
2983
2984static void ql_tx_ring_clean(struct ql_adapter *qdev)
2985{
2986 struct tx_ring *tx_ring;
2987 struct tx_ring_desc *tx_ring_desc;
2988 int i, j;
2989
2990 /*
2991 * Loop through all queues and free
2992 * any resources.
2993 */
2994 for (j = 0; j < qdev->tx_ring_count; j++) {
2995 tx_ring = &qdev->tx_ring[j];
2996 for (i = 0; i < tx_ring->wq_len; i++) {
2997 tx_ring_desc = &tx_ring->q[i];
2998 if (tx_ring_desc && tx_ring_desc->skb) {
2999 netif_err(qdev, ifdown, qdev->ndev,
3000 "Freeing lost SKB %p, from queue %d, index %d.\n",
3001 tx_ring_desc->skb, j,
3002 tx_ring_desc->index);
3003 ql_unmap_send(qdev, tx_ring_desc,
3004 tx_ring_desc->map_cnt);
3005 dev_kfree_skb(tx_ring_desc->skb);
3006 tx_ring_desc->skb = NULL;
3007 }
3008 }
3009 }
3010}
3011
3012static void ql_free_mem_resources(struct ql_adapter *qdev)
3013{
3014 int i;
3015
3016 for (i = 0; i < qdev->tx_ring_count; i++)
3017 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3018 for (i = 0; i < qdev->rx_ring_count; i++)
3019 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3020 ql_free_shadow_space(qdev);
3021}
3022
3023static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3024{
3025 int i;
3026
3027 /* Allocate space for our shadow registers and such. */
3028 if (ql_alloc_shadow_space(qdev))
3029 return -ENOMEM;
3030
3031 for (i = 0; i < qdev->rx_ring_count; i++) {
3032 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3033 netif_err(qdev, ifup, qdev->ndev,
3034 "RX resource allocation failed.\n");
3035 goto err_mem;
3036 }
3037 }
3038 /* Allocate tx queue resources */
3039 for (i = 0; i < qdev->tx_ring_count; i++) {
3040 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3041 netif_err(qdev, ifup, qdev->ndev,
3042 "TX resource allocation failed.\n");
3043 goto err_mem;
3044 }
3045 }
3046 return 0;
3047
3048err_mem:
3049 ql_free_mem_resources(qdev);
3050 return -ENOMEM;
3051}
3052
3053/* Set up the rx ring control block and pass it to the chip.
3054 * The control block is defined as
3055 * "Completion Queue Initialization Control Block", or cqicb.
3056 */
3057static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3058{
3059 struct cqicb *cqicb = &rx_ring->cqicb;
3060 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3061 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3062 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3063 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3064 void __iomem *doorbell_area =
3065 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3066 int err = 0;
3067 u16 bq_len;
3068 u64 tmp;
3069 __le64 *base_indirect_ptr;
3070 int page_entries;
3071
3072 /* Set up the shadow registers for this ring. */
3073 rx_ring->prod_idx_sh_reg = shadow_reg;
3074 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3075 *rx_ring->prod_idx_sh_reg = 0;
3076 shadow_reg += sizeof(u64);
3077 shadow_reg_dma += sizeof(u64);
3078 rx_ring->lbq_base_indirect = shadow_reg;
3079 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3080 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3081 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3082 rx_ring->sbq_base_indirect = shadow_reg;
3083 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3084
3085 /* PCI doorbell mem area + 0x00 for consumer index register */
3086 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3087 rx_ring->cnsmr_idx = 0;
3088 rx_ring->curr_entry = rx_ring->cq_base;
3089
3090 /* PCI doorbell mem area + 0x04 for valid register */
3091 rx_ring->valid_db_reg = doorbell_area + 0x04;
3092
3093 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3094 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3095
3096 /* PCI doorbell mem area + 0x1c */
3097 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3098
3099 memset((void *)cqicb, 0, sizeof(struct cqicb));
3100 cqicb->msix_vect = rx_ring->irq;
3101
3102 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3103 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3104
3105 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3106
3107 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3108
3109 /*
3110 * Set up the control block load flags.
3111 */
3112 cqicb->flags = FLAGS_LC | /* Load queue base address */
3113 FLAGS_LV | /* Load MSI-X vector */
3114 FLAGS_LI; /* Load irq delay values */
3115 if (rx_ring->lbq_len) {
3116 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3117 tmp = (u64)rx_ring->lbq_base_dma;
3118 base_indirect_ptr = rx_ring->lbq_base_indirect;
3119 page_entries = 0;
3120 do {
3121 *base_indirect_ptr = cpu_to_le64(tmp);
3122 tmp += DB_PAGE_SIZE;
3123 base_indirect_ptr++;
3124 page_entries++;
3125 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3126 cqicb->lbq_addr =
3127 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3128 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3129 (u16) rx_ring->lbq_buf_size;
3130 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3131 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3132 (u16) rx_ring->lbq_len;
3133 cqicb->lbq_len = cpu_to_le16(bq_len);
3134 rx_ring->lbq_prod_idx = 0;
3135 rx_ring->lbq_curr_idx = 0;
3136 rx_ring->lbq_clean_idx = 0;
3137 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3138 }
3139 if (rx_ring->sbq_len) {
3140 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3141 tmp = (u64)rx_ring->sbq_base_dma;
3142 base_indirect_ptr = rx_ring->sbq_base_indirect;
3143 page_entries = 0;
3144 do {
3145 *base_indirect_ptr = cpu_to_le64(tmp);
3146 tmp += DB_PAGE_SIZE;
3147 base_indirect_ptr++;
3148 page_entries++;
3149 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3150 cqicb->sbq_addr =
3151 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3152 cqicb->sbq_buf_size =
3153 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3154 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3155 (u16) rx_ring->sbq_len;
3156 cqicb->sbq_len = cpu_to_le16(bq_len);
3157 rx_ring->sbq_prod_idx = 0;
3158 rx_ring->sbq_curr_idx = 0;
3159 rx_ring->sbq_clean_idx = 0;
3160 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3161 }
3162 switch (rx_ring->type) {
3163 case TX_Q:
3164 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3165 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3166 break;
3167 case RX_Q:
3168 /* Inbound completion handling rx_rings run in
3169 * separate NAPI contexts.
3170 */
3171 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3172 64);
3173 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3174 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3175 break;
3176 default:
3177 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3178 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3179 }
3180 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3181 "Initializing rx work queue.\n");
3182 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3183 CFG_LCQ, rx_ring->cq_id);
3184 if (err) {
3185 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3186 return err;
3187 }
3188 return err;
3189}
3190
3191static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3192{
3193 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3194 void __iomem *doorbell_area =
3195 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3196 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3197 (tx_ring->wq_id * sizeof(u64));
3198 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3199 (tx_ring->wq_id * sizeof(u64));
3200 int err = 0;
3201
3202 /*
3203 * Assign doorbell registers for this tx_ring.
3204 */
3205 /* TX PCI doorbell mem area for tx producer index */
3206 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3207 tx_ring->prod_idx = 0;
3208 /* TX PCI doorbell mem area + 0x04 */
3209 tx_ring->valid_db_reg = doorbell_area + 0x04;
3210
3211 /*
3212 * Assign shadow registers for this tx_ring.
3213 */
3214 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3215 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3216
3217 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3218 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3219 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3220 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3221 wqicb->rid = 0;
3222 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3223
3224 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3225
3226 ql_init_tx_ring(qdev, tx_ring);
3227
3228 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3229 (u16) tx_ring->wq_id);
3230 if (err) {
3231 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3232 return err;
3233 }
3234 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3235 "Successfully loaded WQICB.\n");
3236 return err;
3237}
3238
3239static void ql_disable_msix(struct ql_adapter *qdev)
3240{
3241 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3242 pci_disable_msix(qdev->pdev);
3243 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3244 kfree(qdev->msi_x_entry);
3245 qdev->msi_x_entry = NULL;
3246 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3247 pci_disable_msi(qdev->pdev);
3248 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3249 }
3250}
3251
3252/* We start by trying to get the number of vectors
3253 * stored in qdev->intr_count. If we don't get that
3254 * many then we reduce the count and try again.
3255 */
3256static void ql_enable_msix(struct ql_adapter *qdev)
3257{
3258 int i, err;
3259
3260 /* Get the MSIX vectors. */
3261 if (qlge_irq_type == MSIX_IRQ) {
3262 /* Try to alloc space for the msix struct,
3263 * if it fails then go to MSI/legacy.
3264 */
3265 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3266 sizeof(struct msix_entry),
3267 GFP_KERNEL);
3268 if (!qdev->msi_x_entry) {
3269 qlge_irq_type = MSI_IRQ;
3270 goto msi;
3271 }
3272
3273 for (i = 0; i < qdev->intr_count; i++)
3274 qdev->msi_x_entry[i].entry = i;
3275
3276 /* Loop to get our vectors. We start with
3277 * what we want and settle for what we get.
3278 */
3279 do {
3280 err = pci_enable_msix(qdev->pdev,
3281 qdev->msi_x_entry, qdev->intr_count);
3282 if (err > 0)
3283 qdev->intr_count = err;
3284 } while (err > 0);
3285
3286 if (err < 0) {
3287 kfree(qdev->msi_x_entry);
3288 qdev->msi_x_entry = NULL;
3289 netif_warn(qdev, ifup, qdev->ndev,
3290 "MSI-X Enable failed, trying MSI.\n");
3291 qdev->intr_count = 1;
3292 qlge_irq_type = MSI_IRQ;
3293 } else if (err == 0) {
3294 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3295 netif_info(qdev, ifup, qdev->ndev,
3296 "MSI-X Enabled, got %d vectors.\n",
3297 qdev->intr_count);
3298 return;
3299 }
3300 }
3301msi:
3302 qdev->intr_count = 1;
3303 if (qlge_irq_type == MSI_IRQ) {
3304 if (!pci_enable_msi(qdev->pdev)) {
3305 set_bit(QL_MSI_ENABLED, &qdev->flags);
3306 netif_info(qdev, ifup, qdev->ndev,
3307 "Running with MSI interrupts.\n");
3308 return;
3309 }
3310 }
3311 qlge_irq_type = LEG_IRQ;
3312 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3313 "Running with legacy interrupts.\n");
3314}
3315
3316/* Each vector services 1 RSS ring and and 1 or more
3317 * TX completion rings. This function loops through
3318 * the TX completion rings and assigns the vector that
3319 * will service it. An example would be if there are
3320 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3321 * This would mean that vector 0 would service RSS ring 0
3322 * and TX completion rings 0,1,2 and 3. Vector 1 would
3323 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3324 */
3325static void ql_set_tx_vect(struct ql_adapter *qdev)
3326{
3327 int i, j, vect;
3328 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3329
3330 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3331 /* Assign irq vectors to TX rx_rings.*/
3332 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3333 i < qdev->rx_ring_count; i++) {
3334 if (j == tx_rings_per_vector) {
3335 vect++;
3336 j = 0;
3337 }
3338 qdev->rx_ring[i].irq = vect;
3339 j++;
3340 }
3341 } else {
3342 /* For single vector all rings have an irq
3343 * of zero.
3344 */
3345 for (i = 0; i < qdev->rx_ring_count; i++)
3346 qdev->rx_ring[i].irq = 0;
3347 }
3348}
3349
3350/* Set the interrupt mask for this vector. Each vector
3351 * will service 1 RSS ring and 1 or more TX completion
3352 * rings. This function sets up a bit mask per vector
3353 * that indicates which rings it services.
3354 */
3355static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3356{
3357 int j, vect = ctx->intr;
3358 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3359
3360 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3361 /* Add the RSS ring serviced by this vector
3362 * to the mask.
3363 */
3364 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3365 /* Add the TX ring(s) serviced by this vector
3366 * to the mask. */
3367 for (j = 0; j < tx_rings_per_vector; j++) {
3368 ctx->irq_mask |=
3369 (1 << qdev->rx_ring[qdev->rss_ring_count +
3370 (vect * tx_rings_per_vector) + j].cq_id);
3371 }
3372 } else {
3373 /* For single vector we just shift each queue's
3374 * ID into the mask.
3375 */
3376 for (j = 0; j < qdev->rx_ring_count; j++)
3377 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3378 }
3379}
3380
3381/*
3382 * Here we build the intr_context structures based on
3383 * our rx_ring count and intr vector count.
3384 * The intr_context structure is used to hook each vector
3385 * to possibly different handlers.
3386 */
3387static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3388{
3389 int i = 0;
3390 struct intr_context *intr_context = &qdev->intr_context[0];
3391
3392 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3393 /* Each rx_ring has it's
3394 * own intr_context since we have separate
3395 * vectors for each queue.
3396 */
3397 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3398 qdev->rx_ring[i].irq = i;
3399 intr_context->intr = i;
3400 intr_context->qdev = qdev;
3401 /* Set up this vector's bit-mask that indicates
3402 * which queues it services.
3403 */
3404 ql_set_irq_mask(qdev, intr_context);
3405 /*
3406 * We set up each vectors enable/disable/read bits so
3407 * there's no bit/mask calculations in the critical path.
3408 */
3409 intr_context->intr_en_mask =
3410 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3411 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3412 | i;
3413 intr_context->intr_dis_mask =
3414 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3415 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3416 INTR_EN_IHD | i;
3417 intr_context->intr_read_mask =
3418 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3419 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3420 i;
3421 if (i == 0) {
3422 /* The first vector/queue handles
3423 * broadcast/multicast, fatal errors,
3424 * and firmware events. This in addition
3425 * to normal inbound NAPI processing.
3426 */
3427 intr_context->handler = qlge_isr;
3428 sprintf(intr_context->name, "%s-rx-%d",
3429 qdev->ndev->name, i);
3430 } else {
3431 /*
3432 * Inbound queues handle unicast frames only.
3433 */
3434 intr_context->handler = qlge_msix_rx_isr;
3435 sprintf(intr_context->name, "%s-rx-%d",
3436 qdev->ndev->name, i);
3437 }
3438 }
3439 } else {
3440 /*
3441 * All rx_rings use the same intr_context since
3442 * there is only one vector.
3443 */
3444 intr_context->intr = 0;
3445 intr_context->qdev = qdev;
3446 /*
3447 * We set up each vectors enable/disable/read bits so
3448 * there's no bit/mask calculations in the critical path.
3449 */
3450 intr_context->intr_en_mask =
3451 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3452 intr_context->intr_dis_mask =
3453 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3454 INTR_EN_TYPE_DISABLE;
3455 intr_context->intr_read_mask =
3456 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3457 /*
3458 * Single interrupt means one handler for all rings.
3459 */
3460 intr_context->handler = qlge_isr;
3461 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3462 /* Set up this vector's bit-mask that indicates
3463 * which queues it services. In this case there is
3464 * a single vector so it will service all RSS and
3465 * TX completion rings.
3466 */
3467 ql_set_irq_mask(qdev, intr_context);
3468 }
3469 /* Tell the TX completion rings which MSIx vector
3470 * they will be using.
3471 */
3472 ql_set_tx_vect(qdev);
3473}
3474
3475static void ql_free_irq(struct ql_adapter *qdev)
3476{
3477 int i;
3478 struct intr_context *intr_context = &qdev->intr_context[0];
3479
3480 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3481 if (intr_context->hooked) {
3482 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3483 free_irq(qdev->msi_x_entry[i].vector,
3484 &qdev->rx_ring[i]);
3485 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3486 "freeing msix interrupt %d.\n", i);
3487 } else {
3488 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3489 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3490 "freeing msi interrupt %d.\n", i);
3491 }
3492 }
3493 }
3494 ql_disable_msix(qdev);
3495}
3496
3497static int ql_request_irq(struct ql_adapter *qdev)
3498{
3499 int i;
3500 int status = 0;
3501 struct pci_dev *pdev = qdev->pdev;
3502 struct intr_context *intr_context = &qdev->intr_context[0];
3503
3504 ql_resolve_queues_to_irqs(qdev);
3505
3506 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3507 atomic_set(&intr_context->irq_cnt, 0);
3508 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3509 status = request_irq(qdev->msi_x_entry[i].vector,
3510 intr_context->handler,
3511 0,
3512 intr_context->name,
3513 &qdev->rx_ring[i]);
3514 if (status) {
3515 netif_err(qdev, ifup, qdev->ndev,
3516 "Failed request for MSIX interrupt %d.\n",
3517 i);
3518 goto err_irq;
3519 } else {
3520 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3521 "Hooked intr %d, queue type %s, with name %s.\n",
3522 i,
3523 qdev->rx_ring[i].type == DEFAULT_Q ?
3524 "DEFAULT_Q" :
3525 qdev->rx_ring[i].type == TX_Q ?
3526 "TX_Q" :
3527 qdev->rx_ring[i].type == RX_Q ?
3528 "RX_Q" : "",
3529 intr_context->name);
3530 }
3531 } else {
3532 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3533 "trying msi or legacy interrupts.\n");
3534 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3535 "%s: irq = %d.\n", __func__, pdev->irq);
3536 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3537 "%s: context->name = %s.\n", __func__,
3538 intr_context->name);
3539 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3540 "%s: dev_id = 0x%p.\n", __func__,
3541 &qdev->rx_ring[0]);
3542 status =
3543 request_irq(pdev->irq, qlge_isr,
3544 test_bit(QL_MSI_ENABLED,
3545 &qdev->
3546 flags) ? 0 : IRQF_SHARED,
3547 intr_context->name, &qdev->rx_ring[0]);
3548 if (status)
3549 goto err_irq;
3550
3551 netif_err(qdev, ifup, qdev->ndev,
3552 "Hooked intr %d, queue type %s, with name %s.\n",
3553 i,
3554 qdev->rx_ring[0].type == DEFAULT_Q ?
3555 "DEFAULT_Q" :
3556 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3557 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3558 intr_context->name);
3559 }
3560 intr_context->hooked = 1;
3561 }
3562 return status;
3563err_irq:
3564 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3565 ql_free_irq(qdev);
3566 return status;
3567}
3568
3569static int ql_start_rss(struct ql_adapter *qdev)
3570{
3571 static const u8 init_hash_seed[] = {
3572 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3573 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3574 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3575 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3576 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3577 };
3578 struct ricb *ricb = &qdev->ricb;
3579 int status = 0;
3580 int i;
3581 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3582
3583 memset((void *)ricb, 0, sizeof(*ricb));
3584
3585 ricb->base_cq = RSS_L4K;
3586 ricb->flags =
3587 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3588 ricb->mask = cpu_to_le16((u16)(0x3ff));
3589
3590 /*
3591 * Fill out the Indirection Table.
3592 */
3593 for (i = 0; i < 1024; i++)
3594 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3595
3596 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3597 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3598
3599 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3600
3601 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3602 if (status) {
3603 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3604 return status;
3605 }
3606 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3607 "Successfully loaded RICB.\n");
3608 return status;
3609}
3610
3611static int ql_clear_routing_entries(struct ql_adapter *qdev)
3612{
3613 int i, status = 0;
3614
3615 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3616 if (status)
3617 return status;
3618 /* Clear all the entries in the routing table. */
3619 for (i = 0; i < 16; i++) {
3620 status = ql_set_routing_reg(qdev, i, 0, 0);
3621 if (status) {
3622 netif_err(qdev, ifup, qdev->ndev,
3623 "Failed to init routing register for CAM packets.\n");
3624 break;
3625 }
3626 }
3627 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3628 return status;
3629}
3630
3631/* Initialize the frame-to-queue routing. */
3632static int ql_route_initialize(struct ql_adapter *qdev)
3633{
3634 int status = 0;
3635
3636 /* Clear all the entries in the routing table. */
3637 status = ql_clear_routing_entries(qdev);
3638 if (status)
3639 return status;
3640
3641 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3642 if (status)
3643 return status;
3644
3645 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3646 RT_IDX_IP_CSUM_ERR, 1);
3647 if (status) {
3648 netif_err(qdev, ifup, qdev->ndev,
3649 "Failed to init routing register "
3650 "for IP CSUM error packets.\n");
3651 goto exit;
3652 }
3653 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3654 RT_IDX_TU_CSUM_ERR, 1);
3655 if (status) {
3656 netif_err(qdev, ifup, qdev->ndev,
3657 "Failed to init routing register "
3658 "for TCP/UDP CSUM error packets.\n");
3659 goto exit;
3660 }
3661 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3662 if (status) {
3663 netif_err(qdev, ifup, qdev->ndev,
3664 "Failed to init routing register for broadcast packets.\n");
3665 goto exit;
3666 }
3667 /* If we have more than one inbound queue, then turn on RSS in the
3668 * routing block.
3669 */
3670 if (qdev->rss_ring_count > 1) {
3671 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3672 RT_IDX_RSS_MATCH, 1);
3673 if (status) {
3674 netif_err(qdev, ifup, qdev->ndev,
3675 "Failed to init routing register for MATCH RSS packets.\n");
3676 goto exit;
3677 }
3678 }
3679
3680 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3681 RT_IDX_CAM_HIT, 1);
3682 if (status)
3683 netif_err(qdev, ifup, qdev->ndev,
3684 "Failed to init routing register for CAM packets.\n");
3685exit:
3686 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3687 return status;
3688}
3689
3690int ql_cam_route_initialize(struct ql_adapter *qdev)
3691{
3692 int status, set;
3693
3694 /* If check if the link is up and use to
3695 * determine if we are setting or clearing
3696 * the MAC address in the CAM.
3697 */
3698 set = ql_read32(qdev, STS);
3699 set &= qdev->port_link_up;
3700 status = ql_set_mac_addr(qdev, set);
3701 if (status) {
3702 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3703 return status;
3704 }
3705
3706 status = ql_route_initialize(qdev);
3707 if (status)
3708 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3709
3710 return status;
3711}
3712
3713static int ql_adapter_initialize(struct ql_adapter *qdev)
3714{
3715 u32 value, mask;
3716 int i;
3717 int status = 0;
3718
3719 /*
3720 * Set up the System register to halt on errors.
3721 */
3722 value = SYS_EFE | SYS_FAE;
3723 mask = value << 16;
3724 ql_write32(qdev, SYS, mask | value);
3725
3726 /* Set the default queue, and VLAN behavior. */
3727 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3728 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3729 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3730
3731 /* Set the MPI interrupt to enabled. */
3732 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3733
3734 /* Enable the function, set pagesize, enable error checking. */
3735 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3736 FSC_EC | FSC_VM_PAGE_4K;
3737 value |= SPLT_SETTING;
3738
3739 /* Set/clear header splitting. */
3740 mask = FSC_VM_PAGESIZE_MASK |
3741 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3742 ql_write32(qdev, FSC, mask | value);
3743
3744 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3745
3746 /* Set RX packet routing to use port/pci function on which the
3747 * packet arrived on in addition to usual frame routing.
3748 * This is helpful on bonding where both interfaces can have
3749 * the same MAC address.
3750 */
3751 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3752 /* Reroute all packets to our Interface.
3753 * They may have been routed to MPI firmware
3754 * due to WOL.
3755 */
3756 value = ql_read32(qdev, MGMT_RCV_CFG);
3757 value &= ~MGMT_RCV_CFG_RM;
3758 mask = 0xffff0000;
3759
3760 /* Sticky reg needs clearing due to WOL. */
3761 ql_write32(qdev, MGMT_RCV_CFG, mask);
3762 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3763
3764 /* Default WOL is enable on Mezz cards */
3765 if (qdev->pdev->subsystem_device == 0x0068 ||
3766 qdev->pdev->subsystem_device == 0x0180)
3767 qdev->wol = WAKE_MAGIC;
3768
3769 /* Start up the rx queues. */
3770 for (i = 0; i < qdev->rx_ring_count; i++) {
3771 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3772 if (status) {
3773 netif_err(qdev, ifup, qdev->ndev,
3774 "Failed to start rx ring[%d].\n", i);
3775 return status;
3776 }
3777 }
3778
3779 /* If there is more than one inbound completion queue
3780 * then download a RICB to configure RSS.
3781 */
3782 if (qdev->rss_ring_count > 1) {
3783 status = ql_start_rss(qdev);
3784 if (status) {
3785 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3786 return status;
3787 }
3788 }
3789
3790 /* Start up the tx queues. */
3791 for (i = 0; i < qdev->tx_ring_count; i++) {
3792 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3793 if (status) {
3794 netif_err(qdev, ifup, qdev->ndev,
3795 "Failed to start tx ring[%d].\n", i);
3796 return status;
3797 }
3798 }
3799
3800 /* Initialize the port and set the max framesize. */
3801 status = qdev->nic_ops->port_initialize(qdev);
3802 if (status)
3803 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3804
3805 /* Set up the MAC address and frame routing filter. */
3806 status = ql_cam_route_initialize(qdev);
3807 if (status) {
3808 netif_err(qdev, ifup, qdev->ndev,
3809 "Failed to init CAM/Routing tables.\n");
3810 return status;
3811 }
3812
3813 /* Start NAPI for the RSS queues. */
3814 for (i = 0; i < qdev->rss_ring_count; i++) {
3815 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3816 "Enabling NAPI for rx_ring[%d].\n", i);
3817 napi_enable(&qdev->rx_ring[i].napi);
3818 }
3819
3820 return status;
3821}
3822
3823/* Issue soft reset to chip. */
3824static int ql_adapter_reset(struct ql_adapter *qdev)
3825{
3826 u32 value;
3827 int status = 0;
3828 unsigned long end_jiffies;
3829
3830 /* Clear all the entries in the routing table. */
3831 status = ql_clear_routing_entries(qdev);
3832 if (status) {
3833 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3834 return status;
3835 }
3836
3837 end_jiffies = jiffies +
3838 max((unsigned long)1, usecs_to_jiffies(30));
3839
3840 /* Check if bit is set then skip the mailbox command and
3841 * clear the bit, else we are in normal reset process.
3842 */
3843 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3844 /* Stop management traffic. */
3845 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3846
3847 /* Wait for the NIC and MGMNT FIFOs to empty. */
3848 ql_wait_fifo_empty(qdev);
3849 } else
3850 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3851
3852 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3853
3854 do {
3855 value = ql_read32(qdev, RST_FO);
3856 if ((value & RST_FO_FR) == 0)
3857 break;
3858 cpu_relax();
3859 } while (time_before(jiffies, end_jiffies));
3860
3861 if (value & RST_FO_FR) {
3862 netif_err(qdev, ifdown, qdev->ndev,
3863 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3864 status = -ETIMEDOUT;
3865 }
3866
3867 /* Resume management traffic. */
3868 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3869 return status;
3870}
3871
3872static void ql_display_dev_info(struct net_device *ndev)
3873{
3874 struct ql_adapter *qdev = netdev_priv(ndev);
3875
3876 netif_info(qdev, probe, qdev->ndev,
3877 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3878 "XG Roll = %d, XG Rev = %d.\n",
3879 qdev->func,
3880 qdev->port,
3881 qdev->chip_rev_id & 0x0000000f,
3882 qdev->chip_rev_id >> 4 & 0x0000000f,
3883 qdev->chip_rev_id >> 8 & 0x0000000f,
3884 qdev->chip_rev_id >> 12 & 0x0000000f);
3885 netif_info(qdev, probe, qdev->ndev,
3886 "MAC address %pM\n", ndev->dev_addr);
3887}
3888
3889static int ql_wol(struct ql_adapter *qdev)
3890{
3891 int status = 0;
3892 u32 wol = MB_WOL_DISABLE;
3893
3894 /* The CAM is still intact after a reset, but if we
3895 * are doing WOL, then we may need to program the
3896 * routing regs. We would also need to issue the mailbox
3897 * commands to instruct the MPI what to do per the ethtool
3898 * settings.
3899 */
3900
3901 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3902 WAKE_MCAST | WAKE_BCAST)) {
3903 netif_err(qdev, ifdown, qdev->ndev,
3904 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3905 qdev->wol);
3906 return -EINVAL;
3907 }
3908
3909 if (qdev->wol & WAKE_MAGIC) {
3910 status = ql_mb_wol_set_magic(qdev, 1);
3911 if (status) {
3912 netif_err(qdev, ifdown, qdev->ndev,
3913 "Failed to set magic packet on %s.\n",
3914 qdev->ndev->name);
3915 return status;
3916 } else
3917 netif_info(qdev, drv, qdev->ndev,
3918 "Enabled magic packet successfully on %s.\n",
3919 qdev->ndev->name);
3920
3921 wol |= MB_WOL_MAGIC_PKT;
3922 }
3923
3924 if (qdev->wol) {
3925 wol |= MB_WOL_MODE_ON;
3926 status = ql_mb_wol_mode(qdev, wol);
3927 netif_err(qdev, drv, qdev->ndev,
3928 "WOL %s (wol code 0x%x) on %s\n",
3929 (status == 0) ? "Successfully set" : "Failed",
3930 wol, qdev->ndev->name);
3931 }
3932
3933 return status;
3934}
3935
3936static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3937{
3938
3939 /* Don't kill the reset worker thread if we
3940 * are in the process of recovery.
3941 */
3942 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3943 cancel_delayed_work_sync(&qdev->asic_reset_work);
3944 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3945 cancel_delayed_work_sync(&qdev->mpi_work);
3946 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3947 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3948 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3949}
3950
3951static int ql_adapter_down(struct ql_adapter *qdev)
3952{
3953 int i, status = 0;
3954
3955 ql_link_off(qdev);
3956
3957 ql_cancel_all_work_sync(qdev);
3958
3959 for (i = 0; i < qdev->rss_ring_count; i++)
3960 napi_disable(&qdev->rx_ring[i].napi);
3961
3962 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3963
3964 ql_disable_interrupts(qdev);
3965
3966 ql_tx_ring_clean(qdev);
3967
3968 /* Call netif_napi_del() from common point.
3969 */
3970 for (i = 0; i < qdev->rss_ring_count; i++)
3971 netif_napi_del(&qdev->rx_ring[i].napi);
3972
3973 status = ql_adapter_reset(qdev);
3974 if (status)
3975 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3976 qdev->func);
3977 ql_free_rx_buffers(qdev);
3978
3979 return status;
3980}
3981
3982static int ql_adapter_up(struct ql_adapter *qdev)
3983{
3984 int err = 0;
3985
3986 err = ql_adapter_initialize(qdev);
3987 if (err) {
3988 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3989 goto err_init;
3990 }
3991 set_bit(QL_ADAPTER_UP, &qdev->flags);
3992 ql_alloc_rx_buffers(qdev);
3993 /* If the port is initialized and the
3994 * link is up the turn on the carrier.
3995 */
3996 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3997 (ql_read32(qdev, STS) & qdev->port_link_up))
3998 ql_link_on(qdev);
3999 /* Restore rx mode. */
4000 clear_bit(QL_ALLMULTI, &qdev->flags);
4001 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4002 qlge_set_multicast_list(qdev->ndev);
4003
4004 /* Restore vlan setting. */
4005 qlge_restore_vlan(qdev);
4006
4007 ql_enable_interrupts(qdev);
4008 ql_enable_all_completion_interrupts(qdev);
4009 netif_tx_start_all_queues(qdev->ndev);
4010
4011 return 0;
4012err_init:
4013 ql_adapter_reset(qdev);
4014 return err;
4015}
4016
4017static void ql_release_adapter_resources(struct ql_adapter *qdev)
4018{
4019 ql_free_mem_resources(qdev);
4020 ql_free_irq(qdev);
4021}
4022
4023static int ql_get_adapter_resources(struct ql_adapter *qdev)
4024{
4025 int status = 0;
4026
4027 if (ql_alloc_mem_resources(qdev)) {
4028 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
4029 return -ENOMEM;
4030 }
4031 status = ql_request_irq(qdev);
4032 return status;
4033}
4034
4035static int qlge_close(struct net_device *ndev)
4036{
4037 struct ql_adapter *qdev = netdev_priv(ndev);
4038
4039 /* If we hit pci_channel_io_perm_failure
4040 * failure condition, then we already
4041 * brought the adapter down.
4042 */
4043 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4044 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4045 clear_bit(QL_EEH_FATAL, &qdev->flags);
4046 return 0;
4047 }
4048
4049 /*
4050 * Wait for device to recover from a reset.
4051 * (Rarely happens, but possible.)
4052 */
4053 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4054 msleep(1);
4055 ql_adapter_down(qdev);
4056 ql_release_adapter_resources(qdev);
4057 return 0;
4058}
4059
4060static int ql_configure_rings(struct ql_adapter *qdev)
4061{
4062 int i;
4063 struct rx_ring *rx_ring;
4064 struct tx_ring *tx_ring;
4065 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4066 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4067 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4068
4069 qdev->lbq_buf_order = get_order(lbq_buf_len);
4070
4071 /* In a perfect world we have one RSS ring for each CPU
4072 * and each has it's own vector. To do that we ask for
4073 * cpu_cnt vectors. ql_enable_msix() will adjust the
4074 * vector count to what we actually get. We then
4075 * allocate an RSS ring for each.
4076 * Essentially, we are doing min(cpu_count, msix_vector_count).
4077 */
4078 qdev->intr_count = cpu_cnt;
4079 ql_enable_msix(qdev);
4080 /* Adjust the RSS ring count to the actual vector count. */
4081 qdev->rss_ring_count = qdev->intr_count;
4082 qdev->tx_ring_count = cpu_cnt;
4083 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4084
4085 for (i = 0; i < qdev->tx_ring_count; i++) {
4086 tx_ring = &qdev->tx_ring[i];
4087 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4088 tx_ring->qdev = qdev;
4089 tx_ring->wq_id = i;
4090 tx_ring->wq_len = qdev->tx_ring_size;
4091 tx_ring->wq_size =
4092 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4093
4094 /*
4095 * The completion queue ID for the tx rings start
4096 * immediately after the rss rings.
4097 */
4098 tx_ring->cq_id = qdev->rss_ring_count + i;
4099 }
4100
4101 for (i = 0; i < qdev->rx_ring_count; i++) {
4102 rx_ring = &qdev->rx_ring[i];
4103 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4104 rx_ring->qdev = qdev;
4105 rx_ring->cq_id = i;
4106 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4107 if (i < qdev->rss_ring_count) {
4108 /*
4109 * Inbound (RSS) queues.
4110 */
4111 rx_ring->cq_len = qdev->rx_ring_size;
4112 rx_ring->cq_size =
4113 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4114 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4115 rx_ring->lbq_size =
4116 rx_ring->lbq_len * sizeof(__le64);
4117 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4118 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4119 "lbq_buf_size %d, order = %d\n",
4120 rx_ring->lbq_buf_size,
4121 qdev->lbq_buf_order);
4122 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4123 rx_ring->sbq_size =
4124 rx_ring->sbq_len * sizeof(__le64);
4125 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4126 rx_ring->type = RX_Q;
4127 } else {
4128 /*
4129 * Outbound queue handles outbound completions only.
4130 */
4131 /* outbound cq is same size as tx_ring it services. */
4132 rx_ring->cq_len = qdev->tx_ring_size;
4133 rx_ring->cq_size =
4134 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4135 rx_ring->lbq_len = 0;
4136 rx_ring->lbq_size = 0;
4137 rx_ring->lbq_buf_size = 0;
4138 rx_ring->sbq_len = 0;
4139 rx_ring->sbq_size = 0;
4140 rx_ring->sbq_buf_size = 0;
4141 rx_ring->type = TX_Q;
4142 }
4143 }
4144 return 0;
4145}
4146
4147static int qlge_open(struct net_device *ndev)
4148{
4149 int err = 0;
4150 struct ql_adapter *qdev = netdev_priv(ndev);
4151
4152 err = ql_adapter_reset(qdev);
4153 if (err)
4154 return err;
4155
4156 err = ql_configure_rings(qdev);
4157 if (err)
4158 return err;
4159
4160 err = ql_get_adapter_resources(qdev);
4161 if (err)
4162 goto error_up;
4163
4164 err = ql_adapter_up(qdev);
4165 if (err)
4166 goto error_up;
4167
4168 return err;
4169
4170error_up:
4171 ql_release_adapter_resources(qdev);
4172 return err;
4173}
4174
4175static int ql_change_rx_buffers(struct ql_adapter *qdev)
4176{
4177 struct rx_ring *rx_ring;
4178 int i, status;
4179 u32 lbq_buf_len;
4180
4181 /* Wait for an outstanding reset to complete. */
4182 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4183 int i = 3;
4184 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4185 netif_err(qdev, ifup, qdev->ndev,
4186 "Waiting for adapter UP...\n");
4187 ssleep(1);
4188 }
4189
4190 if (!i) {
4191 netif_err(qdev, ifup, qdev->ndev,
4192 "Timed out waiting for adapter UP\n");
4193 return -ETIMEDOUT;
4194 }
4195 }
4196
4197 status = ql_adapter_down(qdev);
4198 if (status)
4199 goto error;
4200
4201 /* Get the new rx buffer size. */
4202 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4203 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4204 qdev->lbq_buf_order = get_order(lbq_buf_len);
4205
4206 for (i = 0; i < qdev->rss_ring_count; i++) {
4207 rx_ring = &qdev->rx_ring[i];
4208 /* Set the new size. */
4209 rx_ring->lbq_buf_size = lbq_buf_len;
4210 }
4211
4212 status = ql_adapter_up(qdev);
4213 if (status)
4214 goto error;
4215
4216 return status;
4217error:
4218 netif_alert(qdev, ifup, qdev->ndev,
4219 "Driver up/down cycle failed, closing device.\n");
4220 set_bit(QL_ADAPTER_UP, &qdev->flags);
4221 dev_close(qdev->ndev);
4222 return status;
4223}
4224
4225static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4226{
4227 struct ql_adapter *qdev = netdev_priv(ndev);
4228 int status;
4229
4230 if (ndev->mtu == 1500 && new_mtu == 9000) {
4231 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4232 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4233 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4234 } else
4235 return -EINVAL;
4236
4237 queue_delayed_work(qdev->workqueue,
4238 &qdev->mpi_port_cfg_work, 3*HZ);
4239
4240 ndev->mtu = new_mtu;
4241
4242 if (!netif_running(qdev->ndev)) {
4243 return 0;
4244 }
4245
4246 status = ql_change_rx_buffers(qdev);
4247 if (status) {
4248 netif_err(qdev, ifup, qdev->ndev,
4249 "Changing MTU failed.\n");
4250 }
4251
4252 return status;
4253}
4254
4255static struct net_device_stats *qlge_get_stats(struct net_device
4256 *ndev)
4257{
4258 struct ql_adapter *qdev = netdev_priv(ndev);
4259 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4260 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4261 unsigned long pkts, mcast, dropped, errors, bytes;
4262 int i;
4263
4264 /* Get RX stats. */
4265 pkts = mcast = dropped = errors = bytes = 0;
4266 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4267 pkts += rx_ring->rx_packets;
4268 bytes += rx_ring->rx_bytes;
4269 dropped += rx_ring->rx_dropped;
4270 errors += rx_ring->rx_errors;
4271 mcast += rx_ring->rx_multicast;
4272 }
4273 ndev->stats.rx_packets = pkts;
4274 ndev->stats.rx_bytes = bytes;
4275 ndev->stats.rx_dropped = dropped;
4276 ndev->stats.rx_errors = errors;
4277 ndev->stats.multicast = mcast;
4278
4279 /* Get TX stats. */
4280 pkts = errors = bytes = 0;
4281 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4282 pkts += tx_ring->tx_packets;
4283 bytes += tx_ring->tx_bytes;
4284 errors += tx_ring->tx_errors;
4285 }
4286 ndev->stats.tx_packets = pkts;
4287 ndev->stats.tx_bytes = bytes;
4288 ndev->stats.tx_errors = errors;
4289 return &ndev->stats;
4290}
4291
4292static void qlge_set_multicast_list(struct net_device *ndev)
4293{
4294 struct ql_adapter *qdev = netdev_priv(ndev);
4295 struct netdev_hw_addr *ha;
4296 int i, status;
4297
4298 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4299 if (status)
4300 return;
4301 /*
4302 * Set or clear promiscuous mode if a
4303 * transition is taking place.
4304 */
4305 if (ndev->flags & IFF_PROMISC) {
4306 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4307 if (ql_set_routing_reg
4308 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4309 netif_err(qdev, hw, qdev->ndev,
4310 "Failed to set promiscuous mode.\n");
4311 } else {
4312 set_bit(QL_PROMISCUOUS, &qdev->flags);
4313 }
4314 }
4315 } else {
4316 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4317 if (ql_set_routing_reg
4318 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4319 netif_err(qdev, hw, qdev->ndev,
4320 "Failed to clear promiscuous mode.\n");
4321 } else {
4322 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4323 }
4324 }
4325 }
4326
4327 /*
4328 * Set or clear all multicast mode if a
4329 * transition is taking place.
4330 */
4331 if ((ndev->flags & IFF_ALLMULTI) ||
4332 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4333 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4334 if (ql_set_routing_reg
4335 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4336 netif_err(qdev, hw, qdev->ndev,
4337 "Failed to set all-multi mode.\n");
4338 } else {
4339 set_bit(QL_ALLMULTI, &qdev->flags);
4340 }
4341 }
4342 } else {
4343 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4344 if (ql_set_routing_reg
4345 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4346 netif_err(qdev, hw, qdev->ndev,
4347 "Failed to clear all-multi mode.\n");
4348 } else {
4349 clear_bit(QL_ALLMULTI, &qdev->flags);
4350 }
4351 }
4352 }
4353
4354 if (!netdev_mc_empty(ndev)) {
4355 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4356 if (status)
4357 goto exit;
4358 i = 0;
4359 netdev_for_each_mc_addr(ha, ndev) {
4360 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4361 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4362 netif_err(qdev, hw, qdev->ndev,
4363 "Failed to loadmulticast address.\n");
4364 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4365 goto exit;
4366 }
4367 i++;
4368 }
4369 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4370 if (ql_set_routing_reg
4371 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4372 netif_err(qdev, hw, qdev->ndev,
4373 "Failed to set multicast match mode.\n");
4374 } else {
4375 set_bit(QL_ALLMULTI, &qdev->flags);
4376 }
4377 }
4378exit:
4379 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4380}
4381
4382static int qlge_set_mac_address(struct net_device *ndev, void *p)
4383{
4384 struct ql_adapter *qdev = netdev_priv(ndev);
4385 struct sockaddr *addr = p;
4386 int status;
4387
4388 if (!is_valid_ether_addr(addr->sa_data))
4389 return -EADDRNOTAVAIL;
4390 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4391 /* Update local copy of current mac address. */
4392 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4393
4394 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4395 if (status)
4396 return status;
4397 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4398 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4399 if (status)
4400 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4401 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4402 return status;
4403}
4404
4405static void qlge_tx_timeout(struct net_device *ndev)
4406{
4407 struct ql_adapter *qdev = netdev_priv(ndev);
4408 ql_queue_asic_error(qdev);
4409}
4410
4411static void ql_asic_reset_work(struct work_struct *work)
4412{
4413 struct ql_adapter *qdev =
4414 container_of(work, struct ql_adapter, asic_reset_work.work);
4415 int status;
4416 rtnl_lock();
4417 status = ql_adapter_down(qdev);
4418 if (status)
4419 goto error;
4420
4421 status = ql_adapter_up(qdev);
4422 if (status)
4423 goto error;
4424
4425 /* Restore rx mode. */
4426 clear_bit(QL_ALLMULTI, &qdev->flags);
4427 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4428 qlge_set_multicast_list(qdev->ndev);
4429
4430 rtnl_unlock();
4431 return;
4432error:
4433 netif_alert(qdev, ifup, qdev->ndev,
4434 "Driver up/down cycle failed, closing device\n");
4435
4436 set_bit(QL_ADAPTER_UP, &qdev->flags);
4437 dev_close(qdev->ndev);
4438 rtnl_unlock();
4439}
4440
4441static const struct nic_operations qla8012_nic_ops = {
4442 .get_flash = ql_get_8012_flash_params,
4443 .port_initialize = ql_8012_port_initialize,
4444};
4445
4446static const struct nic_operations qla8000_nic_ops = {
4447 .get_flash = ql_get_8000_flash_params,
4448 .port_initialize = ql_8000_port_initialize,
4449};
4450
4451/* Find the pcie function number for the other NIC
4452 * on this chip. Since both NIC functions share a
4453 * common firmware we have the lowest enabled function
4454 * do any common work. Examples would be resetting
4455 * after a fatal firmware error, or doing a firmware
4456 * coredump.
4457 */
4458static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4459{
4460 int status = 0;
4461 u32 temp;
4462 u32 nic_func1, nic_func2;
4463
4464 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4465 &temp);
4466 if (status)
4467 return status;
4468
4469 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4470 MPI_TEST_NIC_FUNC_MASK);
4471 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4472 MPI_TEST_NIC_FUNC_MASK);
4473
4474 if (qdev->func == nic_func1)
4475 qdev->alt_func = nic_func2;
4476 else if (qdev->func == nic_func2)
4477 qdev->alt_func = nic_func1;
4478 else
4479 status = -EIO;
4480
4481 return status;
4482}
4483
4484static int ql_get_board_info(struct ql_adapter *qdev)
4485{
4486 int status;
4487 qdev->func =
4488 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4489 if (qdev->func > 3)
4490 return -EIO;
4491
4492 status = ql_get_alt_pcie_func(qdev);
4493 if (status)
4494 return status;
4495
4496 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4497 if (qdev->port) {
4498 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4499 qdev->port_link_up = STS_PL1;
4500 qdev->port_init = STS_PI1;
4501 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4502 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4503 } else {
4504 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4505 qdev->port_link_up = STS_PL0;
4506 qdev->port_init = STS_PI0;
4507 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4508 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4509 }
4510 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4511 qdev->device_id = qdev->pdev->device;
4512 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4513 qdev->nic_ops = &qla8012_nic_ops;
4514 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4515 qdev->nic_ops = &qla8000_nic_ops;
4516 return status;
4517}
4518
4519static void ql_release_all(struct pci_dev *pdev)
4520{
4521 struct net_device *ndev = pci_get_drvdata(pdev);
4522 struct ql_adapter *qdev = netdev_priv(ndev);
4523
4524 if (qdev->workqueue) {
4525 destroy_workqueue(qdev->workqueue);
4526 qdev->workqueue = NULL;
4527 }
4528
4529 if (qdev->reg_base)
4530 iounmap(qdev->reg_base);
4531 if (qdev->doorbell_area)
4532 iounmap(qdev->doorbell_area);
4533 vfree(qdev->mpi_coredump);
4534 pci_release_regions(pdev);
4535 pci_set_drvdata(pdev, NULL);
4536}
4537
4538static int __devinit ql_init_device(struct pci_dev *pdev,
4539 struct net_device *ndev, int cards_found)
4540{
4541 struct ql_adapter *qdev = netdev_priv(ndev);
4542 int err = 0;
4543
4544 memset((void *)qdev, 0, sizeof(*qdev));
4545 err = pci_enable_device(pdev);
4546 if (err) {
4547 dev_err(&pdev->dev, "PCI device enable failed.\n");
4548 return err;
4549 }
4550
4551 qdev->ndev = ndev;
4552 qdev->pdev = pdev;
4553 pci_set_drvdata(pdev, ndev);
4554
4555 /* Set PCIe read request size */
4556 err = pcie_set_readrq(pdev, 4096);
4557 if (err) {
4558 dev_err(&pdev->dev, "Set readrq failed.\n");
4559 goto err_out1;
4560 }
4561
4562 err = pci_request_regions(pdev, DRV_NAME);
4563 if (err) {
4564 dev_err(&pdev->dev, "PCI region request failed.\n");
4565 return err;
4566 }
4567
4568 pci_set_master(pdev);
4569 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4570 set_bit(QL_DMA64, &qdev->flags);
4571 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4572 } else {
4573 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4574 if (!err)
4575 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4576 }
4577
4578 if (err) {
4579 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4580 goto err_out2;
4581 }
4582
4583 /* Set PCIe reset type for EEH to fundamental. */
4584 pdev->needs_freset = 1;
4585 pci_save_state(pdev);
4586 qdev->reg_base =
4587 ioremap_nocache(pci_resource_start(pdev, 1),
4588 pci_resource_len(pdev, 1));
4589 if (!qdev->reg_base) {
4590 dev_err(&pdev->dev, "Register mapping failed.\n");
4591 err = -ENOMEM;
4592 goto err_out2;
4593 }
4594
4595 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4596 qdev->doorbell_area =
4597 ioremap_nocache(pci_resource_start(pdev, 3),
4598 pci_resource_len(pdev, 3));
4599 if (!qdev->doorbell_area) {
4600 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4601 err = -ENOMEM;
4602 goto err_out2;
4603 }
4604
4605 err = ql_get_board_info(qdev);
4606 if (err) {
4607 dev_err(&pdev->dev, "Register access failed.\n");
4608 err = -EIO;
4609 goto err_out2;
4610 }
4611 qdev->msg_enable = netif_msg_init(debug, default_msg);
4612 spin_lock_init(&qdev->hw_lock);
4613 spin_lock_init(&qdev->stats_lock);
4614
4615 if (qlge_mpi_coredump) {
4616 qdev->mpi_coredump =
4617 vmalloc(sizeof(struct ql_mpi_coredump));
4618 if (qdev->mpi_coredump == NULL) {
4619 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4620 err = -ENOMEM;
4621 goto err_out2;
4622 }
4623 if (qlge_force_coredump)
4624 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4625 }
4626 /* make sure the EEPROM is good */
4627 err = qdev->nic_ops->get_flash(qdev);
4628 if (err) {
4629 dev_err(&pdev->dev, "Invalid FLASH.\n");
4630 goto err_out2;
4631 }
4632
4633 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4634 /* Keep local copy of current mac address. */
4635 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4636
4637 /* Set up the default ring sizes. */
4638 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4639 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4640
4641 /* Set up the coalescing parameters. */
4642 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4643 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4644 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4645 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4646
4647 /*
4648 * Set up the operating parameters.
4649 */
4650 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4651 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4652 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4653 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4654 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4655 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4656 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4657 init_completion(&qdev->ide_completion);
4658 mutex_init(&qdev->mpi_mutex);
4659
4660 if (!cards_found) {
4661 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4662 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4663 DRV_NAME, DRV_VERSION);
4664 }
4665 return 0;
4666err_out2:
4667 ql_release_all(pdev);
4668err_out1:
4669 pci_disable_device(pdev);
4670 return err;
4671}
4672
4673static const struct net_device_ops qlge_netdev_ops = {
4674 .ndo_open = qlge_open,
4675 .ndo_stop = qlge_close,
4676 .ndo_start_xmit = qlge_send,
4677 .ndo_change_mtu = qlge_change_mtu,
4678 .ndo_get_stats = qlge_get_stats,
4679 .ndo_set_multicast_list = qlge_set_multicast_list,
4680 .ndo_set_mac_address = qlge_set_mac_address,
4681 .ndo_validate_addr = eth_validate_addr,
4682 .ndo_tx_timeout = qlge_tx_timeout,
4683 .ndo_fix_features = qlge_fix_features,
4684 .ndo_set_features = qlge_set_features,
4685 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4686 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4687};
4688
4689static void ql_timer(unsigned long data)
4690{
4691 struct ql_adapter *qdev = (struct ql_adapter *)data;
4692 u32 var = 0;
4693
4694 var = ql_read32(qdev, STS);
4695 if (pci_channel_offline(qdev->pdev)) {
4696 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4697 return;
4698 }
4699
4700 mod_timer(&qdev->timer, jiffies + (5*HZ));
4701}
4702
4703static int __devinit qlge_probe(struct pci_dev *pdev,
4704 const struct pci_device_id *pci_entry)
4705{
4706 struct net_device *ndev = NULL;
4707 struct ql_adapter *qdev = NULL;
4708 static int cards_found = 0;
4709 int err = 0;
4710
4711 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4712 min(MAX_CPUS, (int)num_online_cpus()));
4713 if (!ndev)
4714 return -ENOMEM;
4715
4716 err = ql_init_device(pdev, ndev, cards_found);
4717 if (err < 0) {
4718 free_netdev(ndev);
4719 return err;
4720 }
4721
4722 qdev = netdev_priv(ndev);
4723 SET_NETDEV_DEV(ndev, &pdev->dev);
4724 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4725 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4726 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4727 ndev->features = ndev->hw_features |
4728 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4729
4730 if (test_bit(QL_DMA64, &qdev->flags))
4731 ndev->features |= NETIF_F_HIGHDMA;
4732
4733 /*
4734 * Set up net_device structure.
4735 */
4736 ndev->tx_queue_len = qdev->tx_ring_size;
4737 ndev->irq = pdev->irq;
4738
4739 ndev->netdev_ops = &qlge_netdev_ops;
4740 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4741 ndev->watchdog_timeo = 10 * HZ;
4742
4743 err = register_netdev(ndev);
4744 if (err) {
4745 dev_err(&pdev->dev, "net device registration failed.\n");
4746 ql_release_all(pdev);
4747 pci_disable_device(pdev);
4748 return err;
4749 }
4750 /* Start up the timer to trigger EEH if
4751 * the bus goes dead
4752 */
4753 init_timer_deferrable(&qdev->timer);
4754 qdev->timer.data = (unsigned long)qdev;
4755 qdev->timer.function = ql_timer;
4756 qdev->timer.expires = jiffies + (5*HZ);
4757 add_timer(&qdev->timer);
4758 ql_link_off(qdev);
4759 ql_display_dev_info(ndev);
4760 atomic_set(&qdev->lb_count, 0);
4761 cards_found++;
4762 return 0;
4763}
4764
4765netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4766{
4767 return qlge_send(skb, ndev);
4768}
4769
4770int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4771{
4772 return ql_clean_inbound_rx_ring(rx_ring, budget);
4773}
4774
4775static void __devexit qlge_remove(struct pci_dev *pdev)
4776{
4777 struct net_device *ndev = pci_get_drvdata(pdev);
4778 struct ql_adapter *qdev = netdev_priv(ndev);
4779 del_timer_sync(&qdev->timer);
4780 ql_cancel_all_work_sync(qdev);
4781 unregister_netdev(ndev);
4782 ql_release_all(pdev);
4783 pci_disable_device(pdev);
4784 free_netdev(ndev);
4785}
4786
4787/* Clean up resources without touching hardware. */
4788static void ql_eeh_close(struct net_device *ndev)
4789{
4790 int i;
4791 struct ql_adapter *qdev = netdev_priv(ndev);
4792
4793 if (netif_carrier_ok(ndev)) {
4794 netif_carrier_off(ndev);
4795 netif_stop_queue(ndev);
4796 }
4797
4798 /* Disabling the timer */
4799 del_timer_sync(&qdev->timer);
4800 ql_cancel_all_work_sync(qdev);
4801
4802 for (i = 0; i < qdev->rss_ring_count; i++)
4803 netif_napi_del(&qdev->rx_ring[i].napi);
4804
4805 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4806 ql_tx_ring_clean(qdev);
4807 ql_free_rx_buffers(qdev);
4808 ql_release_adapter_resources(qdev);
4809}
4810
4811/*
4812 * This callback is called by the PCI subsystem whenever
4813 * a PCI bus error is detected.
4814 */
4815static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4816 enum pci_channel_state state)
4817{
4818 struct net_device *ndev = pci_get_drvdata(pdev);
4819 struct ql_adapter *qdev = netdev_priv(ndev);
4820
4821 switch (state) {
4822 case pci_channel_io_normal:
4823 return PCI_ERS_RESULT_CAN_RECOVER;
4824 case pci_channel_io_frozen:
4825 netif_device_detach(ndev);
4826 if (netif_running(ndev))
4827 ql_eeh_close(ndev);
4828 pci_disable_device(pdev);
4829 return PCI_ERS_RESULT_NEED_RESET;
4830 case pci_channel_io_perm_failure:
4831 dev_err(&pdev->dev,
4832 "%s: pci_channel_io_perm_failure.\n", __func__);
4833 ql_eeh_close(ndev);
4834 set_bit(QL_EEH_FATAL, &qdev->flags);
4835 return PCI_ERS_RESULT_DISCONNECT;
4836 }
4837
4838 /* Request a slot reset. */
4839 return PCI_ERS_RESULT_NEED_RESET;
4840}
4841
4842/*
4843 * This callback is called after the PCI buss has been reset.
4844 * Basically, this tries to restart the card from scratch.
4845 * This is a shortened version of the device probe/discovery code,
4846 * it resembles the first-half of the () routine.
4847 */
4848static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4849{
4850 struct net_device *ndev = pci_get_drvdata(pdev);
4851 struct ql_adapter *qdev = netdev_priv(ndev);
4852
4853 pdev->error_state = pci_channel_io_normal;
4854
4855 pci_restore_state(pdev);
4856 if (pci_enable_device(pdev)) {
4857 netif_err(qdev, ifup, qdev->ndev,
4858 "Cannot re-enable PCI device after reset.\n");
4859 return PCI_ERS_RESULT_DISCONNECT;
4860 }
4861 pci_set_master(pdev);
4862
4863 if (ql_adapter_reset(qdev)) {
4864 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4865 set_bit(QL_EEH_FATAL, &qdev->flags);
4866 return PCI_ERS_RESULT_DISCONNECT;
4867 }
4868
4869 return PCI_ERS_RESULT_RECOVERED;
4870}
4871
4872static void qlge_io_resume(struct pci_dev *pdev)
4873{
4874 struct net_device *ndev = pci_get_drvdata(pdev);
4875 struct ql_adapter *qdev = netdev_priv(ndev);
4876 int err = 0;
4877
4878 if (netif_running(ndev)) {
4879 err = qlge_open(ndev);
4880 if (err) {
4881 netif_err(qdev, ifup, qdev->ndev,
4882 "Device initialization failed after reset.\n");
4883 return;
4884 }
4885 } else {
4886 netif_err(qdev, ifup, qdev->ndev,
4887 "Device was not running prior to EEH.\n");
4888 }
4889 mod_timer(&qdev->timer, jiffies + (5*HZ));
4890 netif_device_attach(ndev);
4891}
4892
4893static struct pci_error_handlers qlge_err_handler = {
4894 .error_detected = qlge_io_error_detected,
4895 .slot_reset = qlge_io_slot_reset,
4896 .resume = qlge_io_resume,
4897};
4898
4899static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4900{
4901 struct net_device *ndev = pci_get_drvdata(pdev);
4902 struct ql_adapter *qdev = netdev_priv(ndev);
4903 int err;
4904
4905 netif_device_detach(ndev);
4906 del_timer_sync(&qdev->timer);
4907
4908 if (netif_running(ndev)) {
4909 err = ql_adapter_down(qdev);
4910 if (!err)
4911 return err;
4912 }
4913
4914 ql_wol(qdev);
4915 err = pci_save_state(pdev);
4916 if (err)
4917 return err;
4918
4919 pci_disable_device(pdev);
4920
4921 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4922
4923 return 0;
4924}
4925
4926#ifdef CONFIG_PM
4927static int qlge_resume(struct pci_dev *pdev)
4928{
4929 struct net_device *ndev = pci_get_drvdata(pdev);
4930 struct ql_adapter *qdev = netdev_priv(ndev);
4931 int err;
4932
4933 pci_set_power_state(pdev, PCI_D0);
4934 pci_restore_state(pdev);
4935 err = pci_enable_device(pdev);
4936 if (err) {
4937 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4938 return err;
4939 }
4940 pci_set_master(pdev);
4941
4942 pci_enable_wake(pdev, PCI_D3hot, 0);
4943 pci_enable_wake(pdev, PCI_D3cold, 0);
4944
4945 if (netif_running(ndev)) {
4946 err = ql_adapter_up(qdev);
4947 if (err)
4948 return err;
4949 }
4950
4951 mod_timer(&qdev->timer, jiffies + (5*HZ));
4952 netif_device_attach(ndev);
4953
4954 return 0;
4955}
4956#endif /* CONFIG_PM */
4957
4958static void qlge_shutdown(struct pci_dev *pdev)
4959{
4960 qlge_suspend(pdev, PMSG_SUSPEND);
4961}
4962
4963static struct pci_driver qlge_driver = {
4964 .name = DRV_NAME,
4965 .id_table = qlge_pci_tbl,
4966 .probe = qlge_probe,
4967 .remove = __devexit_p(qlge_remove),
4968#ifdef CONFIG_PM
4969 .suspend = qlge_suspend,
4970 .resume = qlge_resume,
4971#endif
4972 .shutdown = qlge_shutdown,
4973 .err_handler = &qlge_err_handler
4974};
4975
4976static int __init qlge_init_module(void)
4977{
4978 return pci_register_driver(&qlge_driver);
4979}
4980
4981static void __exit qlge_exit(void)
4982{
4983 pci_unregister_driver(&qlge_driver);
4984}
4985
4986module_init(qlge_init_module);
4987module_exit(qlge_exit);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
new file mode 100644
index 00000000000..ff2bf8a4e24
--- /dev/null
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -0,0 +1,1284 @@
1#include "qlge.h"
2
3int ql_unpause_mpi_risc(struct ql_adapter *qdev)
4{
5 u32 tmp;
6
7 /* Un-pause the RISC */
8 tmp = ql_read32(qdev, CSR);
9 if (!(tmp & CSR_RP))
10 return -EIO;
11
12 ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
13 return 0;
14}
15
16int ql_pause_mpi_risc(struct ql_adapter *qdev)
17{
18 u32 tmp;
19 int count = UDELAY_COUNT;
20
21 /* Pause the RISC */
22 ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
23 do {
24 tmp = ql_read32(qdev, CSR);
25 if (tmp & CSR_RP)
26 break;
27 mdelay(UDELAY_DELAY);
28 count--;
29 } while (count);
30 return (count == 0) ? -ETIMEDOUT : 0;
31}
32
33int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
34{
35 u32 tmp;
36 int count = UDELAY_COUNT;
37
38 /* Reset the RISC */
39 ql_write32(qdev, CSR, CSR_CMD_SET_RST);
40 do {
41 tmp = ql_read32(qdev, CSR);
42 if (tmp & CSR_RR) {
43 ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
44 break;
45 }
46 mdelay(UDELAY_DELAY);
47 count--;
48 } while (count);
49 return (count == 0) ? -ETIMEDOUT : 0;
50}
51
52int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
53{
54 int status;
55 /* wait for reg to come ready */
56 status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
57 if (status)
58 goto exit;
59 /* set up for reg read */
60 ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
61 /* wait for reg to come ready */
62 status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
63 if (status)
64 goto exit;
65 /* get the data */
66 *data = ql_read32(qdev, PROC_DATA);
67exit:
68 return status;
69}
70
71int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
72{
73 int status = 0;
74 /* wait for reg to come ready */
75 status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
76 if (status)
77 goto exit;
78 /* write the data to the data reg */
79 ql_write32(qdev, PROC_DATA, data);
80 /* trigger the write */
81 ql_write32(qdev, PROC_ADDR, reg);
82 /* wait for reg to come ready */
83 status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
84 if (status)
85 goto exit;
86exit:
87 return status;
88}
89
90int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
91{
92 int status;
93 status = ql_write_mpi_reg(qdev, 0x00001010, 1);
94 return status;
95}
96
97/* Determine if we are in charge of the firwmare. If
98 * we are the lower of the 2 NIC pcie functions, or if
99 * we are the higher function and the lower function
100 * is not enabled.
101 */
102int ql_own_firmware(struct ql_adapter *qdev)
103{
104 u32 temp;
105
106 /* If we are the lower of the 2 NIC functions
107 * on the chip the we are responsible for
108 * core dump and firmware reset after an error.
109 */
110 if (qdev->func < qdev->alt_func)
111 return 1;
112
113 /* If we are the higher of the 2 NIC functions
114 * on the chip and the lower function is not
115 * enabled, then we are responsible for
116 * core dump and firmware reset after an error.
117 */
118 temp = ql_read32(qdev, STS);
119 if (!(temp & (1 << (8 + qdev->alt_func))))
120 return 1;
121
122 return 0;
123
124}
125
126static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
127{
128 int i, status;
129
130 status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
131 if (status)
132 return -EBUSY;
133 for (i = 0; i < mbcp->out_count; i++) {
134 status =
135 ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
136 &mbcp->mbox_out[i]);
137 if (status) {
138 netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
139 break;
140 }
141 }
142 ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
143 return status;
144}
145
146/* Wait for a single mailbox command to complete.
147 * Returns zero on success.
148 */
149static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
150{
151 int count = 100;
152 u32 value;
153
154 do {
155 value = ql_read32(qdev, STS);
156 if (value & STS_PI)
157 return 0;
158 mdelay(UDELAY_DELAY); /* 100ms */
159 } while (--count);
160 return -ETIMEDOUT;
161}
162
163/* Execute a single mailbox command.
164 * Caller must hold PROC_ADDR semaphore.
165 */
166static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
167{
168 int i, status;
169
170 /*
171 * Make sure there's nothing pending.
172 * This shouldn't happen.
173 */
174 if (ql_read32(qdev, CSR) & CSR_HRI)
175 return -EIO;
176
177 status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
178 if (status)
179 return status;
180
181 /*
182 * Fill the outbound mailboxes.
183 */
184 for (i = 0; i < mbcp->in_count; i++) {
185 status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
186 mbcp->mbox_in[i]);
187 if (status)
188 goto end;
189 }
190 /*
191 * Wake up the MPI firmware.
192 */
193 ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
194end:
195 ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
196 return status;
197}
198
199/* We are being asked by firmware to accept
200 * a change to the port. This is only
201 * a change to max frame sizes (Tx/Rx), pause
202 * parameters, or loopback mode. We wake up a worker
203 * to handler processing this since a mailbox command
204 * will need to be sent to ACK the request.
205 */
206static int ql_idc_req_aen(struct ql_adapter *qdev)
207{
208 int status;
209 struct mbox_params *mbcp = &qdev->idc_mbc;
210
211 netif_err(qdev, drv, qdev->ndev, "Enter!\n");
212 /* Get the status data and start up a thread to
213 * handle the request.
214 */
215 mbcp = &qdev->idc_mbc;
216 mbcp->out_count = 4;
217 status = ql_get_mb_sts(qdev, mbcp);
218 if (status) {
219 netif_err(qdev, drv, qdev->ndev,
220 "Could not read MPI, resetting ASIC!\n");
221 ql_queue_asic_error(qdev);
222 } else {
223 /* Begin polled mode early so
224 * we don't get another interrupt
225 * when we leave mpi_worker.
226 */
227 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
228 queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
229 }
230 return status;
231}
232
233/* Process an inter-device event completion.
234 * If good, signal the caller's completion.
235 */
236static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
237{
238 int status;
239 struct mbox_params *mbcp = &qdev->idc_mbc;
240 mbcp->out_count = 4;
241 status = ql_get_mb_sts(qdev, mbcp);
242 if (status) {
243 netif_err(qdev, drv, qdev->ndev,
244 "Could not read MPI, resetting RISC!\n");
245 ql_queue_fw_error(qdev);
246 } else
247 /* Wake up the sleeping mpi_idc_work thread that is
248 * waiting for this event.
249 */
250 complete(&qdev->ide_completion);
251
252 return status;
253}
254
255static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
256{
257 int status;
258 mbcp->out_count = 2;
259
260 status = ql_get_mb_sts(qdev, mbcp);
261 if (status) {
262 netif_err(qdev, drv, qdev->ndev,
263 "%s: Could not get mailbox status.\n", __func__);
264 return;
265 }
266
267 qdev->link_status = mbcp->mbox_out[1];
268 netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
269
270 /* If we're coming back from an IDC event
271 * then set up the CAM and frame routing.
272 */
273 if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
274 status = ql_cam_route_initialize(qdev);
275 if (status) {
276 netif_err(qdev, ifup, qdev->ndev,
277 "Failed to init CAM/Routing tables.\n");
278 return;
279 } else
280 clear_bit(QL_CAM_RT_SET, &qdev->flags);
281 }
282
283 /* Queue up a worker to check the frame
284 * size information, and fix it if it's not
285 * to our liking.
286 */
287 if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
288 netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
289 set_bit(QL_PORT_CFG, &qdev->flags);
290 /* Begin polled mode early so
291 * we don't get another interrupt
292 * when we leave mpi_worker dpc.
293 */
294 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
295 queue_delayed_work(qdev->workqueue,
296 &qdev->mpi_port_cfg_work, 0);
297 }
298
299 ql_link_on(qdev);
300}
301
302static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
303{
304 int status;
305
306 mbcp->out_count = 3;
307
308 status = ql_get_mb_sts(qdev, mbcp);
309 if (status)
310 netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
311
312 ql_link_off(qdev);
313}
314
315static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
316{
317 int status;
318
319 mbcp->out_count = 5;
320
321 status = ql_get_mb_sts(qdev, mbcp);
322 if (status)
323 netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
324 else
325 netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
326
327 return status;
328}
329
330static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
331{
332 int status;
333
334 mbcp->out_count = 1;
335
336 status = ql_get_mb_sts(qdev, mbcp);
337 if (status)
338 netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
339 else
340 netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
341
342 return status;
343}
344
345static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
346{
347 int status;
348
349 mbcp->out_count = 6;
350
351 status = ql_get_mb_sts(qdev, mbcp);
352 if (status)
353 netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
354 else {
355 int i;
356 netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
357 for (i = 0; i < mbcp->out_count; i++)
358 netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
359 i, mbcp->mbox_out[i]);
360
361 }
362
363 return status;
364}
365
366static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
367{
368 int status;
369
370 mbcp->out_count = 2;
371
372 status = ql_get_mb_sts(qdev, mbcp);
373 if (status) {
374 netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
375 } else {
376 netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
377 mbcp->mbox_out[1]);
378 qdev->fw_rev_id = mbcp->mbox_out[1];
379 status = ql_cam_route_initialize(qdev);
380 if (status)
381 netif_err(qdev, ifup, qdev->ndev,
382 "Failed to init CAM/Routing tables.\n");
383 }
384}
385
386/* Process an async event and clear it unless it's an
387 * error condition.
388 * This can get called iteratively from the mpi_work thread
389 * when events arrive via an interrupt.
390 * It also gets called when a mailbox command is polling for
391 * it's completion. */
392static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
393{
394 int status;
395 int orig_count = mbcp->out_count;
396
397 /* Just get mailbox zero for now. */
398 mbcp->out_count = 1;
399 status = ql_get_mb_sts(qdev, mbcp);
400 if (status) {
401 netif_err(qdev, drv, qdev->ndev,
402 "Could not read MPI, resetting ASIC!\n");
403 ql_queue_asic_error(qdev);
404 goto end;
405 }
406
407 switch (mbcp->mbox_out[0]) {
408
409 /* This case is only active when we arrive here
410 * as a result of issuing a mailbox command to
411 * the firmware.
412 */
413 case MB_CMD_STS_INTRMDT:
414 case MB_CMD_STS_GOOD:
415 case MB_CMD_STS_INVLD_CMD:
416 case MB_CMD_STS_XFC_ERR:
417 case MB_CMD_STS_CSUM_ERR:
418 case MB_CMD_STS_ERR:
419 case MB_CMD_STS_PARAM_ERR:
420 /* We can only get mailbox status if we're polling from an
421 * unfinished command. Get the rest of the status data and
422 * return back to the caller.
423 * We only end up here when we're polling for a mailbox
424 * command completion.
425 */
426 mbcp->out_count = orig_count;
427 status = ql_get_mb_sts(qdev, mbcp);
428 return status;
429
430 /* We are being asked by firmware to accept
431 * a change to the port. This is only
432 * a change to max frame sizes (Tx/Rx), pause
433 * parameters, or loopback mode.
434 */
435 case AEN_IDC_REQ:
436 status = ql_idc_req_aen(qdev);
437 break;
438
439 /* Process and inbound IDC event.
440 * This will happen when we're trying to
441 * change tx/rx max frame size, change pause
442 * parameters or loopback mode.
443 */
444 case AEN_IDC_CMPLT:
445 case AEN_IDC_EXT:
446 status = ql_idc_cmplt_aen(qdev);
447 break;
448
449 case AEN_LINK_UP:
450 ql_link_up(qdev, mbcp);
451 break;
452
453 case AEN_LINK_DOWN:
454 ql_link_down(qdev, mbcp);
455 break;
456
457 case AEN_FW_INIT_DONE:
458 /* If we're in process on executing the firmware,
459 * then convert the status to normal mailbox status.
460 */
461 if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
462 mbcp->out_count = orig_count;
463 status = ql_get_mb_sts(qdev, mbcp);
464 mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
465 return status;
466 }
467 ql_init_fw_done(qdev, mbcp);
468 break;
469
470 case AEN_AEN_SFP_IN:
471 ql_sfp_in(qdev, mbcp);
472 break;
473
474 case AEN_AEN_SFP_OUT:
475 ql_sfp_out(qdev, mbcp);
476 break;
477
478 /* This event can arrive at boot time or after an
479 * MPI reset if the firmware failed to initialize.
480 */
481 case AEN_FW_INIT_FAIL:
482 /* If we're in process on executing the firmware,
483 * then convert the status to normal mailbox status.
484 */
485 if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
486 mbcp->out_count = orig_count;
487 status = ql_get_mb_sts(qdev, mbcp);
488 mbcp->mbox_out[0] = MB_CMD_STS_ERR;
489 return status;
490 }
491 netif_err(qdev, drv, qdev->ndev,
492 "Firmware initialization failed.\n");
493 status = -EIO;
494 ql_queue_fw_error(qdev);
495 break;
496
497 case AEN_SYS_ERR:
498 netif_err(qdev, drv, qdev->ndev, "System Error.\n");
499 ql_queue_fw_error(qdev);
500 status = -EIO;
501 break;
502
503 case AEN_AEN_LOST:
504 ql_aen_lost(qdev, mbcp);
505 break;
506
507 case AEN_DCBX_CHG:
508 /* Need to support AEN 8110 */
509 break;
510 default:
511 netif_err(qdev, drv, qdev->ndev,
512 "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
513 /* Clear the MPI firmware status. */
514 }
515end:
516 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
517 /* Restore the original mailbox count to
518 * what the caller asked for. This can get
519 * changed when a mailbox command is waiting
520 * for a response and an AEN arrives and
521 * is handled.
522 * */
523 mbcp->out_count = orig_count;
524 return status;
525}
526
527/* Execute a single mailbox command.
528 * mbcp is a pointer to an array of u32. Each
529 * element in the array contains the value for it's
530 * respective mailbox register.
531 */
532static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
533{
534 int status;
535 unsigned long count;
536
537 mutex_lock(&qdev->mpi_mutex);
538
539 /* Begin polled mode for MPI */
540 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
541
542 /* Load the mailbox registers and wake up MPI RISC. */
543 status = ql_exec_mb_cmd(qdev, mbcp);
544 if (status)
545 goto end;
546
547
548 /* If we're generating a system error, then there's nothing
549 * to wait for.
550 */
551 if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
552 goto end;
553
554 /* Wait for the command to complete. We loop
555 * here because some AEN might arrive while
556 * we're waiting for the mailbox command to
557 * complete. If more than 5 seconds expire we can
558 * assume something is wrong. */
559 count = jiffies + HZ * MAILBOX_TIMEOUT;
560 do {
561 /* Wait for the interrupt to come in. */
562 status = ql_wait_mbx_cmd_cmplt(qdev);
563 if (status)
564 continue;
565
566 /* Process the event. If it's an AEN, it
567 * will be handled in-line or a worker
568 * will be spawned. If it's our completion
569 * we will catch it below.
570 */
571 status = ql_mpi_handler(qdev, mbcp);
572 if (status)
573 goto end;
574
575 /* It's either the completion for our mailbox
576 * command complete or an AEN. If it's our
577 * completion then get out.
578 */
579 if (((mbcp->mbox_out[0] & 0x0000f000) ==
580 MB_CMD_STS_GOOD) ||
581 ((mbcp->mbox_out[0] & 0x0000f000) ==
582 MB_CMD_STS_INTRMDT))
583 goto done;
584 } while (time_before(jiffies, count));
585
586 netif_err(qdev, drv, qdev->ndev,
587 "Timed out waiting for mailbox complete.\n");
588 status = -ETIMEDOUT;
589 goto end;
590
591done:
592
593 /* Now we can clear the interrupt condition
594 * and look at our status.
595 */
596 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
597
598 if (((mbcp->mbox_out[0] & 0x0000f000) !=
599 MB_CMD_STS_GOOD) &&
600 ((mbcp->mbox_out[0] & 0x0000f000) !=
601 MB_CMD_STS_INTRMDT)) {
602 status = -EIO;
603 }
604end:
605 /* End polled mode for MPI */
606 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
607 mutex_unlock(&qdev->mpi_mutex);
608 return status;
609}
610
611/* Get MPI firmware version. This will be used for
612 * driver banner and for ethtool info.
613 * Returns zero on success.
614 */
615int ql_mb_about_fw(struct ql_adapter *qdev)
616{
617 struct mbox_params mbc;
618 struct mbox_params *mbcp = &mbc;
619 int status = 0;
620
621 memset(mbcp, 0, sizeof(struct mbox_params));
622
623 mbcp->in_count = 1;
624 mbcp->out_count = 3;
625
626 mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
627
628 status = ql_mailbox_command(qdev, mbcp);
629 if (status)
630 return status;
631
632 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
633 netif_err(qdev, drv, qdev->ndev,
634 "Failed about firmware command\n");
635 status = -EIO;
636 }
637
638 /* Store the firmware version */
639 qdev->fw_rev_id = mbcp->mbox_out[1];
640
641 return status;
642}
643
644/* Get functional state for MPI firmware.
645 * Returns zero on success.
646 */
647int ql_mb_get_fw_state(struct ql_adapter *qdev)
648{
649 struct mbox_params mbc;
650 struct mbox_params *mbcp = &mbc;
651 int status = 0;
652
653 memset(mbcp, 0, sizeof(struct mbox_params));
654
655 mbcp->in_count = 1;
656 mbcp->out_count = 2;
657
658 mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
659
660 status = ql_mailbox_command(qdev, mbcp);
661 if (status)
662 return status;
663
664 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
665 netif_err(qdev, drv, qdev->ndev,
666 "Failed Get Firmware State.\n");
667 status = -EIO;
668 }
669
670 /* If bit zero is set in mbx 1 then the firmware is
671 * running, but not initialized. This should never
672 * happen.
673 */
674 if (mbcp->mbox_out[1] & 1) {
675 netif_err(qdev, drv, qdev->ndev,
676 "Firmware waiting for initialization.\n");
677 status = -EIO;
678 }
679
680 return status;
681}
682
683/* Send and ACK mailbox command to the firmware to
684 * let it continue with the change.
685 */
686static int ql_mb_idc_ack(struct ql_adapter *qdev)
687{
688 struct mbox_params mbc;
689 struct mbox_params *mbcp = &mbc;
690 int status = 0;
691
692 memset(mbcp, 0, sizeof(struct mbox_params));
693
694 mbcp->in_count = 5;
695 mbcp->out_count = 1;
696
697 mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
698 mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
699 mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
700 mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
701 mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
702
703 status = ql_mailbox_command(qdev, mbcp);
704 if (status)
705 return status;
706
707 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
708 netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
709 status = -EIO;
710 }
711 return status;
712}
713
714/* Get link settings and maximum frame size settings
715 * for the current port.
716 * Most likely will block.
717 */
718int ql_mb_set_port_cfg(struct ql_adapter *qdev)
719{
720 struct mbox_params mbc;
721 struct mbox_params *mbcp = &mbc;
722 int status = 0;
723
724 memset(mbcp, 0, sizeof(struct mbox_params));
725
726 mbcp->in_count = 3;
727 mbcp->out_count = 1;
728
729 mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
730 mbcp->mbox_in[1] = qdev->link_config;
731 mbcp->mbox_in[2] = qdev->max_frame_size;
732
733
734 status = ql_mailbox_command(qdev, mbcp);
735 if (status)
736 return status;
737
738 if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
739 netif_err(qdev, drv, qdev->ndev,
740 "Port Config sent, wait for IDC.\n");
741 } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
742 netif_err(qdev, drv, qdev->ndev,
743 "Failed Set Port Configuration.\n");
744 status = -EIO;
745 }
746 return status;
747}
748
749static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
750 u32 size)
751{
752 int status = 0;
753 struct mbox_params mbc;
754 struct mbox_params *mbcp = &mbc;
755
756 memset(mbcp, 0, sizeof(struct mbox_params));
757
758 mbcp->in_count = 9;
759 mbcp->out_count = 1;
760
761 mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
762 mbcp->mbox_in[1] = LSW(addr);
763 mbcp->mbox_in[2] = MSW(req_dma);
764 mbcp->mbox_in[3] = LSW(req_dma);
765 mbcp->mbox_in[4] = MSW(size);
766 mbcp->mbox_in[5] = LSW(size);
767 mbcp->mbox_in[6] = MSW(MSD(req_dma));
768 mbcp->mbox_in[7] = LSW(MSD(req_dma));
769 mbcp->mbox_in[8] = MSW(addr);
770
771
772 status = ql_mailbox_command(qdev, mbcp);
773 if (status)
774 return status;
775
776 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
777 netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
778 status = -EIO;
779 }
780 return status;
781}
782
783/* Issue a mailbox command to dump RISC RAM. */
784int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
785 u32 ram_addr, int word_count)
786{
787 int status;
788 char *my_buf;
789 dma_addr_t buf_dma;
790
791 my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
792 &buf_dma);
793 if (!my_buf)
794 return -EIO;
795
796 status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
797 if (!status)
798 memcpy(buf, my_buf, word_count * sizeof(u32));
799
800 pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
801 buf_dma);
802 return status;
803}
804
805/* Get link settings and maximum frame size settings
806 * for the current port.
807 * Most likely will block.
808 */
809int ql_mb_get_port_cfg(struct ql_adapter *qdev)
810{
811 struct mbox_params mbc;
812 struct mbox_params *mbcp = &mbc;
813 int status = 0;
814
815 memset(mbcp, 0, sizeof(struct mbox_params));
816
817 mbcp->in_count = 1;
818 mbcp->out_count = 3;
819
820 mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
821
822 status = ql_mailbox_command(qdev, mbcp);
823 if (status)
824 return status;
825
826 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
827 netif_err(qdev, drv, qdev->ndev,
828 "Failed Get Port Configuration.\n");
829 status = -EIO;
830 } else {
831 netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
832 "Passed Get Port Configuration.\n");
833 qdev->link_config = mbcp->mbox_out[1];
834 qdev->max_frame_size = mbcp->mbox_out[2];
835 }
836 return status;
837}
838
839int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
840{
841 struct mbox_params mbc;
842 struct mbox_params *mbcp = &mbc;
843 int status;
844
845 memset(mbcp, 0, sizeof(struct mbox_params));
846
847 mbcp->in_count = 2;
848 mbcp->out_count = 1;
849
850 mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
851 mbcp->mbox_in[1] = wol;
852
853
854 status = ql_mailbox_command(qdev, mbcp);
855 if (status)
856 return status;
857
858 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
859 netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
860 status = -EIO;
861 }
862 return status;
863}
864
865int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
866{
867 struct mbox_params mbc;
868 struct mbox_params *mbcp = &mbc;
869 int status;
870 u8 *addr = qdev->ndev->dev_addr;
871
872 memset(mbcp, 0, sizeof(struct mbox_params));
873
874 mbcp->in_count = 8;
875 mbcp->out_count = 1;
876
877 mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
878 if (enable_wol) {
879 mbcp->mbox_in[1] = (u32)addr[0];
880 mbcp->mbox_in[2] = (u32)addr[1];
881 mbcp->mbox_in[3] = (u32)addr[2];
882 mbcp->mbox_in[4] = (u32)addr[3];
883 mbcp->mbox_in[5] = (u32)addr[4];
884 mbcp->mbox_in[6] = (u32)addr[5];
885 mbcp->mbox_in[7] = 0;
886 } else {
887 mbcp->mbox_in[1] = 0;
888 mbcp->mbox_in[2] = 1;
889 mbcp->mbox_in[3] = 1;
890 mbcp->mbox_in[4] = 1;
891 mbcp->mbox_in[5] = 1;
892 mbcp->mbox_in[6] = 1;
893 mbcp->mbox_in[7] = 0;
894 }
895
896 status = ql_mailbox_command(qdev, mbcp);
897 if (status)
898 return status;
899
900 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
901 netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
902 status = -EIO;
903 }
904 return status;
905}
906
907/* IDC - Inter Device Communication...
908 * Some firmware commands require consent of adjacent FCOE
909 * function. This function waits for the OK, or a
910 * counter-request for a little more time.i
911 * The firmware will complete the request if the other
912 * function doesn't respond.
913 */
914static int ql_idc_wait(struct ql_adapter *qdev)
915{
916 int status = -ETIMEDOUT;
917 long wait_time = 1 * HZ;
918 struct mbox_params *mbcp = &qdev->idc_mbc;
919 do {
920 /* Wait here for the command to complete
921 * via the IDC process.
922 */
923 wait_time =
924 wait_for_completion_timeout(&qdev->ide_completion,
925 wait_time);
926 if (!wait_time) {
927 netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
928 break;
929 }
930 /* Now examine the response from the IDC process.
931 * We might have a good completion or a request for
932 * more wait time.
933 */
934 if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
935 netif_err(qdev, drv, qdev->ndev,
936 "IDC Time Extension from function.\n");
937 wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
938 } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
939 netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
940 status = 0;
941 break;
942 } else {
943 netif_err(qdev, drv, qdev->ndev,
944 "IDC: Invalid State 0x%.04x.\n",
945 mbcp->mbox_out[0]);
946 status = -EIO;
947 break;
948 }
949 } while (wait_time);
950
951 return status;
952}
953
954int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
955{
956 struct mbox_params mbc;
957 struct mbox_params *mbcp = &mbc;
958 int status;
959
960 memset(mbcp, 0, sizeof(struct mbox_params));
961
962 mbcp->in_count = 2;
963 mbcp->out_count = 1;
964
965 mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
966 mbcp->mbox_in[1] = led_config;
967
968
969 status = ql_mailbox_command(qdev, mbcp);
970 if (status)
971 return status;
972
973 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
974 netif_err(qdev, drv, qdev->ndev,
975 "Failed to set LED Configuration.\n");
976 status = -EIO;
977 }
978
979 return status;
980}
981
982int ql_mb_get_led_cfg(struct ql_adapter *qdev)
983{
984 struct mbox_params mbc;
985 struct mbox_params *mbcp = &mbc;
986 int status;
987
988 memset(mbcp, 0, sizeof(struct mbox_params));
989
990 mbcp->in_count = 1;
991 mbcp->out_count = 2;
992
993 mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
994
995 status = ql_mailbox_command(qdev, mbcp);
996 if (status)
997 return status;
998
999 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
1000 netif_err(qdev, drv, qdev->ndev,
1001 "Failed to get LED Configuration.\n");
1002 status = -EIO;
1003 } else
1004 qdev->led_config = mbcp->mbox_out[1];
1005
1006 return status;
1007}
1008
1009int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
1010{
1011 struct mbox_params mbc;
1012 struct mbox_params *mbcp = &mbc;
1013 int status;
1014
1015 memset(mbcp, 0, sizeof(struct mbox_params));
1016
1017 mbcp->in_count = 1;
1018 mbcp->out_count = 2;
1019
1020 mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
1021 mbcp->mbox_in[1] = control;
1022
1023 status = ql_mailbox_command(qdev, mbcp);
1024 if (status)
1025 return status;
1026
1027 if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
1028 return status;
1029
1030 if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
1031 netif_err(qdev, drv, qdev->ndev,
1032 "Command not supported by firmware.\n");
1033 status = -EINVAL;
1034 } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
1035 /* This indicates that the firmware is
1036 * already in the state we are trying to
1037 * change it to.
1038 */
1039 netif_err(qdev, drv, qdev->ndev,
1040 "Command parameters make no change.\n");
1041 }
1042 return status;
1043}
1044
1045/* Returns a negative error code or the mailbox command status. */
1046static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
1047{
1048 struct mbox_params mbc;
1049 struct mbox_params *mbcp = &mbc;
1050 int status;
1051
1052 memset(mbcp, 0, sizeof(struct mbox_params));
1053 *control = 0;
1054
1055 mbcp->in_count = 1;
1056 mbcp->out_count = 1;
1057
1058 mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
1059
1060 status = ql_mailbox_command(qdev, mbcp);
1061 if (status)
1062 return status;
1063
1064 if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
1065 *control = mbcp->mbox_in[1];
1066 return status;
1067 }
1068
1069 if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
1070 netif_err(qdev, drv, qdev->ndev,
1071 "Command not supported by firmware.\n");
1072 status = -EINVAL;
1073 } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
1074 netif_err(qdev, drv, qdev->ndev,
1075 "Failed to get MPI traffic control.\n");
1076 status = -EIO;
1077 }
1078 return status;
1079}
1080
1081int ql_wait_fifo_empty(struct ql_adapter *qdev)
1082{
1083 int count = 5;
1084 u32 mgmnt_fifo_empty;
1085 u32 nic_fifo_empty;
1086
1087 do {
1088 nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
1089 ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
1090 mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
1091 if (nic_fifo_empty && mgmnt_fifo_empty)
1092 return 0;
1093 msleep(100);
1094 } while (count-- > 0);
1095 return -ETIMEDOUT;
1096}
1097
1098/* API called in work thread context to set new TX/RX
1099 * maximum frame size values to match MTU.
1100 */
1101static int ql_set_port_cfg(struct ql_adapter *qdev)
1102{
1103 int status;
1104 status = ql_mb_set_port_cfg(qdev);
1105 if (status)
1106 return status;
1107 status = ql_idc_wait(qdev);
1108 return status;
1109}
1110
1111/* The following routines are worker threads that process
1112 * events that may sleep waiting for completion.
1113 */
1114
1115/* This thread gets the maximum TX and RX frame size values
1116 * from the firmware and, if necessary, changes them to match
1117 * the MTU setting.
1118 */
1119void ql_mpi_port_cfg_work(struct work_struct *work)
1120{
1121 struct ql_adapter *qdev =
1122 container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
1123 int status;
1124
1125 status = ql_mb_get_port_cfg(qdev);
1126 if (status) {
1127 netif_err(qdev, drv, qdev->ndev,
1128 "Bug: Failed to get port config data.\n");
1129 goto err;
1130 }
1131
1132 if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
1133 qdev->max_frame_size ==
1134 CFG_DEFAULT_MAX_FRAME_SIZE)
1135 goto end;
1136
1137 qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
1138 qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
1139 status = ql_set_port_cfg(qdev);
1140 if (status) {
1141 netif_err(qdev, drv, qdev->ndev,
1142 "Bug: Failed to set port config data.\n");
1143 goto err;
1144 }
1145end:
1146 clear_bit(QL_PORT_CFG, &qdev->flags);
1147 return;
1148err:
1149 ql_queue_fw_error(qdev);
1150 goto end;
1151}
1152
1153/* Process an inter-device request. This is issues by
1154 * the firmware in response to another function requesting
1155 * a change to the port. We set a flag to indicate a change
1156 * has been made and then send a mailbox command ACKing
1157 * the change request.
1158 */
1159void ql_mpi_idc_work(struct work_struct *work)
1160{
1161 struct ql_adapter *qdev =
1162 container_of(work, struct ql_adapter, mpi_idc_work.work);
1163 int status;
1164 struct mbox_params *mbcp = &qdev->idc_mbc;
1165 u32 aen;
1166 int timeout;
1167
1168 aen = mbcp->mbox_out[1] >> 16;
1169 timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
1170
1171 switch (aen) {
1172 default:
1173 netif_err(qdev, drv, qdev->ndev,
1174 "Bug: Unhandled IDC action.\n");
1175 break;
1176 case MB_CMD_PORT_RESET:
1177 case MB_CMD_STOP_FW:
1178 ql_link_off(qdev);
1179 case MB_CMD_SET_PORT_CFG:
1180 /* Signal the resulting link up AEN
1181 * that the frame routing and mac addr
1182 * needs to be set.
1183 * */
1184 set_bit(QL_CAM_RT_SET, &qdev->flags);
1185 /* Do ACK if required */
1186 if (timeout) {
1187 status = ql_mb_idc_ack(qdev);
1188 if (status)
1189 netif_err(qdev, drv, qdev->ndev,
1190 "Bug: No pending IDC!\n");
1191 } else {
1192 netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
1193 "IDC ACK not required\n");
1194 status = 0; /* success */
1195 }
1196 break;
1197
1198 /* These sub-commands issued by another (FCoE)
1199 * function are requesting to do an operation
1200 * on the shared resource (MPI environment).
1201 * We currently don't issue these so we just
1202 * ACK the request.
1203 */
1204 case MB_CMD_IOP_RESTART_MPI:
1205 case MB_CMD_IOP_PREP_LINK_DOWN:
1206 /* Drop the link, reload the routing
1207 * table when link comes up.
1208 */
1209 ql_link_off(qdev);
1210 set_bit(QL_CAM_RT_SET, &qdev->flags);
1211 /* Fall through. */
1212 case MB_CMD_IOP_DVR_START:
1213 case MB_CMD_IOP_FLASH_ACC:
1214 case MB_CMD_IOP_CORE_DUMP_MPI:
1215 case MB_CMD_IOP_PREP_UPDATE_MPI:
1216 case MB_CMD_IOP_COMP_UPDATE_MPI:
1217 case MB_CMD_IOP_NONE: /* an IDC without params */
1218 /* Do ACK if required */
1219 if (timeout) {
1220 status = ql_mb_idc_ack(qdev);
1221 if (status)
1222 netif_err(qdev, drv, qdev->ndev,
1223 "Bug: No pending IDC!\n");
1224 } else {
1225 netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
1226 "IDC ACK not required\n");
1227 status = 0; /* success */
1228 }
1229 break;
1230 }
1231}
1232
1233void ql_mpi_work(struct work_struct *work)
1234{
1235 struct ql_adapter *qdev =
1236 container_of(work, struct ql_adapter, mpi_work.work);
1237 struct mbox_params mbc;
1238 struct mbox_params *mbcp = &mbc;
1239 int err = 0;
1240
1241 mutex_lock(&qdev->mpi_mutex);
1242 /* Begin polled mode for MPI */
1243 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
1244
1245 while (ql_read32(qdev, STS) & STS_PI) {
1246 memset(mbcp, 0, sizeof(struct mbox_params));
1247 mbcp->out_count = 1;
1248 /* Don't continue if an async event
1249 * did not complete properly.
1250 */
1251 err = ql_mpi_handler(qdev, mbcp);
1252 if (err)
1253 break;
1254 }
1255
1256 /* End polled mode for MPI */
1257 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
1258 mutex_unlock(&qdev->mpi_mutex);
1259 ql_enable_completion_interrupt(qdev, 0);
1260}
1261
1262void ql_mpi_reset_work(struct work_struct *work)
1263{
1264 struct ql_adapter *qdev =
1265 container_of(work, struct ql_adapter, mpi_reset_work.work);
1266 cancel_delayed_work_sync(&qdev->mpi_work);
1267 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
1268 cancel_delayed_work_sync(&qdev->mpi_idc_work);
1269 /* If we're not the dominant NIC function,
1270 * then there is nothing to do.
1271 */
1272 if (!ql_own_firmware(qdev)) {
1273 netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
1274 return;
1275 }
1276
1277 if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
1278 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
1279 qdev->core_is_dumped = 1;
1280 queue_delayed_work(qdev->workqueue,
1281 &qdev->mpi_core_to_log, 5 * HZ);
1282 }
1283 ql_soft_reset_mpi_risc(qdev);
1284}