aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r--drivers/net/qlge/Makefile7
-rw-r--r--drivers/net/qlge/qlge.h1593
-rw-r--r--drivers/net/qlge/qlge_dbg.c858
-rw-r--r--drivers/net/qlge/qlge_ethtool.c415
-rw-r--r--drivers/net/qlge/qlge_main.c3954
-rw-r--r--drivers/net/qlge/qlge_mpi.c150
6 files changed, 6977 insertions, 0 deletions
diff --git a/drivers/net/qlge/Makefile b/drivers/net/qlge/Makefile
new file mode 100644
index 000000000000..8a197658d76f
--- /dev/null
+++ b/drivers/net/qlge/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the Qlogic 10GbE PCI Express ethernet driver
3#
4
5obj-$(CONFIG_QLGE) += qlge.o
6
7qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
new file mode 100644
index 000000000000..c37ea436c918
--- /dev/null
+++ b/drivers/net/qlge/qlge.h
@@ -0,0 +1,1593 @@
1/*
2 * QLogic QLA41xx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qlge for copyright and licensing details.
6 */
7#ifndef _QLGE_H_
8#define _QLGE_H_
9
10#include <linux/pci.h>
11#include <linux/netdevice.h>
12
13/*
14 * General definitions...
15 */
16#define DRV_NAME "qlge"
17#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
18#define DRV_VERSION "v1.00.00-b3"
19
20#define PFX "qlge: "
21#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
22 do { \
23 if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \
24 ; \
25 else \
26 dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \
27 "%s: " fmt, __func__, ##args); \
28 } while (0)
29
30#define QLGE_VENDOR_ID 0x1077
31#define QLGE_DEVICE_ID1 0x8012
32#define QLGE_DEVICE_ID 0x8000
33
34#define MAX_RX_RINGS 128
35#define MAX_TX_RINGS 128
36
37#define NUM_TX_RING_ENTRIES 256
38#define NUM_RX_RING_ENTRIES 256
39
40#define NUM_SMALL_BUFFERS 512
41#define NUM_LARGE_BUFFERS 512
42
43#define SMALL_BUFFER_SIZE 256
44#define LARGE_BUFFER_SIZE PAGE_SIZE
45#define MAX_SPLIT_SIZE 1023
46#define QLGE_SB_PAD 32
47
48#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
49#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
50#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
51#define UDELAY_COUNT 3
52#define UDELAY_DELAY 10
53
54
55#define TX_DESC_PER_IOCB 8
56/* The maximum number of frags we handle is based
57 * on PAGE_SIZE...
58 */
59#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
60#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
61#elif (PAGE_SHIFT == 16) /* 64k pages */
62#define TX_DESC_PER_OAL 0
63#endif
64
65#define DB_PAGE_SIZE 4096
66
67/*
68 * Processor Address Register (PROC_ADDR) bit definitions.
69 */
70enum {
71
72 /* Misc. stuff */
73 MAILBOX_COUNT = 16,
74
75 PROC_ADDR_RDY = (1 << 31),
76 PROC_ADDR_R = (1 << 30),
77 PROC_ADDR_ERR = (1 << 29),
78 PROC_ADDR_DA = (1 << 28),
79 PROC_ADDR_FUNC0_MBI = 0x00001180,
80 PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
81 PROC_ADDR_FUNC0_CTL = 0x000011a1,
82 PROC_ADDR_FUNC2_MBI = 0x00001280,
83 PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
84 PROC_ADDR_FUNC2_CTL = 0x000012a1,
85 PROC_ADDR_MPI_RISC = 0x00000000,
86 PROC_ADDR_MDE = 0x00010000,
87 PROC_ADDR_REGBLOCK = 0x00020000,
88 PROC_ADDR_RISC_REG = 0x00030000,
89};
90
91/*
92 * System Register (SYS) bit definitions.
93 */
94enum {
95 SYS_EFE = (1 << 0),
96 SYS_FAE = (1 << 1),
97 SYS_MDC = (1 << 2),
98 SYS_DST = (1 << 3),
99 SYS_DWC = (1 << 4),
100 SYS_EVW = (1 << 5),
101 SYS_OMP_DLY_MASK = 0x3f000000,
102 /*
103 * There are no values defined as of edit #15.
104 */
105 SYS_ODI = (1 << 14),
106};
107
108/*
109 * Reset/Failover Register (RST_FO) bit definitions.
110 */
111enum {
112 RST_FO_TFO = (1 << 0),
113 RST_FO_RR_MASK = 0x00060000,
114 RST_FO_RR_CQ_CAM = 0x00000000,
115 RST_FO_RR_DROP = 0x00000001,
116 RST_FO_RR_DQ = 0x00000002,
117 RST_FO_RR_RCV_FUNC_CQ = 0x00000003,
118 RST_FO_FRB = (1 << 12),
119 RST_FO_MOP = (1 << 13),
120 RST_FO_REG = (1 << 14),
121 RST_FO_FR = (1 << 15),
122};
123
124/*
125 * Function Specific Control Register (FSC) bit definitions.
126 */
127enum {
128 FSC_DBRST_MASK = 0x00070000,
129 FSC_DBRST_256 = 0x00000000,
130 FSC_DBRST_512 = 0x00000001,
131 FSC_DBRST_768 = 0x00000002,
132 FSC_DBRST_1024 = 0x00000003,
133 FSC_DBL_MASK = 0x00180000,
134 FSC_DBL_DBRST = 0x00000000,
135 FSC_DBL_MAX_PLD = 0x00000008,
136 FSC_DBL_MAX_BRST = 0x00000010,
137 FSC_DBL_128_BYTES = 0x00000018,
138 FSC_EC = (1 << 5),
139 FSC_EPC_MASK = 0x00c00000,
140 FSC_EPC_INBOUND = (1 << 6),
141 FSC_EPC_OUTBOUND = (1 << 7),
142 FSC_VM_PAGESIZE_MASK = 0x07000000,
143 FSC_VM_PAGE_2K = 0x00000100,
144 FSC_VM_PAGE_4K = 0x00000200,
145 FSC_VM_PAGE_8K = 0x00000300,
146 FSC_VM_PAGE_64K = 0x00000600,
147 FSC_SH = (1 << 11),
148 FSC_DSB = (1 << 12),
149 FSC_STE = (1 << 13),
150 FSC_FE = (1 << 15),
151};
152
153/*
154 * Host Command Status Register (CSR) bit definitions.
155 */
156enum {
157 CSR_ERR_STS_MASK = 0x0000003f,
158 /*
159 * There are no valued defined as of edit #15.
160 */
161 CSR_RR = (1 << 8),
162 CSR_HRI = (1 << 9),
163 CSR_RP = (1 << 10),
164 CSR_CMD_PARM_SHIFT = 22,
165 CSR_CMD_NOP = 0x00000000,
166 CSR_CMD_SET_RST = 0x1000000,
167 CSR_CMD_CLR_RST = 0x20000000,
168 CSR_CMD_SET_PAUSE = 0x30000000,
169 CSR_CMD_CLR_PAUSE = 0x40000000,
170 CSR_CMD_SET_H2R_INT = 0x50000000,
171 CSR_CMD_CLR_H2R_INT = 0x60000000,
172 CSR_CMD_PAR_EN = 0x70000000,
173 CSR_CMD_SET_BAD_PAR = 0x80000000,
174 CSR_CMD_CLR_BAD_PAR = 0x90000000,
175 CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
176};
177
178/*
179 * Configuration Register (CFG) bit definitions.
180 */
181enum {
182 CFG_LRQ = (1 << 0),
183 CFG_DRQ = (1 << 1),
184 CFG_LR = (1 << 2),
185 CFG_DR = (1 << 3),
186 CFG_LE = (1 << 5),
187 CFG_LCQ = (1 << 6),
188 CFG_DCQ = (1 << 7),
189 CFG_Q_SHIFT = 8,
190 CFG_Q_MASK = 0x7f000000,
191};
192
193/*
194 * Status Register (STS) bit definitions.
195 */
196enum {
197 STS_FE = (1 << 0),
198 STS_PI = (1 << 1),
199 STS_PL0 = (1 << 2),
200 STS_PL1 = (1 << 3),
201 STS_PI0 = (1 << 4),
202 STS_PI1 = (1 << 5),
203 STS_FUNC_ID_MASK = 0x000000c0,
204 STS_FUNC_ID_SHIFT = 6,
205 STS_F0E = (1 << 8),
206 STS_F1E = (1 << 9),
207 STS_F2E = (1 << 10),
208 STS_F3E = (1 << 11),
209 STS_NFE = (1 << 12),
210};
211
212/*
213 * Interrupt Enable Register (INTR_EN) bit definitions.
214 */
215enum {
216 INTR_EN_INTR_MASK = 0x007f0000,
217 INTR_EN_TYPE_MASK = 0x03000000,
218 INTR_EN_TYPE_ENABLE = 0x00000100,
219 INTR_EN_TYPE_DISABLE = 0x00000200,
220 INTR_EN_TYPE_READ = 0x00000300,
221 INTR_EN_IHD = (1 << 13),
222 INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
223 INTR_EN_EI = (1 << 14),
224 INTR_EN_EN = (1 << 15),
225};
226
227/*
228 * Interrupt Mask Register (INTR_MASK) bit definitions.
229 */
230enum {
231 INTR_MASK_PI = (1 << 0),
232 INTR_MASK_HL0 = (1 << 1),
233 INTR_MASK_LH0 = (1 << 2),
234 INTR_MASK_HL1 = (1 << 3),
235 INTR_MASK_LH1 = (1 << 4),
236 INTR_MASK_SE = (1 << 5),
237 INTR_MASK_LSC = (1 << 6),
238 INTR_MASK_MC = (1 << 7),
239 INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
240};
241
242/*
243 * Register (REV_ID) bit definitions.
244 */
245enum {
246 REV_ID_MASK = 0x0000000f,
247 REV_ID_NICROLL_SHIFT = 0,
248 REV_ID_NICREV_SHIFT = 4,
249 REV_ID_XGROLL_SHIFT = 8,
250 REV_ID_XGREV_SHIFT = 12,
251 REV_ID_CHIPREV_SHIFT = 28,
252};
253
254/*
255 * Force ECC Error Register (FRC_ECC_ERR) bit definitions.
256 */
257enum {
258 FRC_ECC_ERR_VW = (1 << 12),
259 FRC_ECC_ERR_VB = (1 << 13),
260 FRC_ECC_ERR_NI = (1 << 14),
261 FRC_ECC_ERR_NO = (1 << 15),
262 FRC_ECC_PFE_SHIFT = 16,
263 FRC_ECC_ERR_DO = (1 << 18),
264 FRC_ECC_P14 = (1 << 19),
265};
266
267/*
268 * Error Status Register (ERR_STS) bit definitions.
269 */
270enum {
271 ERR_STS_NOF = (1 << 0),
272 ERR_STS_NIF = (1 << 1),
273 ERR_STS_DRP = (1 << 2),
274 ERR_STS_XGP = (1 << 3),
275 ERR_STS_FOU = (1 << 4),
276 ERR_STS_FOC = (1 << 5),
277 ERR_STS_FOF = (1 << 6),
278 ERR_STS_FIU = (1 << 7),
279 ERR_STS_FIC = (1 << 8),
280 ERR_STS_FIF = (1 << 9),
281 ERR_STS_MOF = (1 << 10),
282 ERR_STS_TA = (1 << 11),
283 ERR_STS_MA = (1 << 12),
284 ERR_STS_MPE = (1 << 13),
285 ERR_STS_SCE = (1 << 14),
286 ERR_STS_STE = (1 << 15),
287 ERR_STS_FOW = (1 << 16),
288 ERR_STS_UE = (1 << 17),
289 ERR_STS_MCH = (1 << 26),
290 ERR_STS_LOC_SHIFT = 27,
291};
292
293/*
294 * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
295 */
296enum {
297 RAM_DBG_ADDR_FW = (1 << 30),
298 RAM_DBG_ADDR_FR = (1 << 31),
299};
300
301/*
302 * Semaphore Register (SEM) bit definitions.
303 */
304enum {
305 /*
306 * Example:
307 * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
308 */
309 SEM_CLEAR = 0,
310 SEM_SET = 1,
311 SEM_FORCE = 3,
312 SEM_XGMAC0_SHIFT = 0,
313 SEM_XGMAC1_SHIFT = 2,
314 SEM_ICB_SHIFT = 4,
315 SEM_MAC_ADDR_SHIFT = 6,
316 SEM_FLASH_SHIFT = 8,
317 SEM_PROBE_SHIFT = 10,
318 SEM_RT_IDX_SHIFT = 12,
319 SEM_PROC_REG_SHIFT = 14,
320 SEM_XGMAC0_MASK = 0x00030000,
321 SEM_XGMAC1_MASK = 0x000c0000,
322 SEM_ICB_MASK = 0x00300000,
323 SEM_MAC_ADDR_MASK = 0x00c00000,
324 SEM_FLASH_MASK = 0x03000000,
325 SEM_PROBE_MASK = 0x0c000000,
326 SEM_RT_IDX_MASK = 0x30000000,
327 SEM_PROC_REG_MASK = 0xc0000000,
328};
329
330/*
331 * 10G MAC Address Register (XGMAC_ADDR) bit definitions.
332 */
333enum {
334 XGMAC_ADDR_RDY = (1 << 31),
335 XGMAC_ADDR_R = (1 << 30),
336 XGMAC_ADDR_XME = (1 << 29),
337
338 /* XGMAC control registers */
339 PAUSE_SRC_LO = 0x00000100,
340 PAUSE_SRC_HI = 0x00000104,
341 GLOBAL_CFG = 0x00000108,
342 GLOBAL_CFG_RESET = (1 << 0),
343 GLOBAL_CFG_JUMBO = (1 << 6),
344 GLOBAL_CFG_TX_STAT_EN = (1 << 10),
345 GLOBAL_CFG_RX_STAT_EN = (1 << 11),
346 TX_CFG = 0x0000010c,
347 TX_CFG_RESET = (1 << 0),
348 TX_CFG_EN = (1 << 1),
349 TX_CFG_PREAM = (1 << 2),
350 RX_CFG = 0x00000110,
351 RX_CFG_RESET = (1 << 0),
352 RX_CFG_EN = (1 << 1),
353 RX_CFG_PREAM = (1 << 2),
354 FLOW_CTL = 0x0000011c,
355 PAUSE_OPCODE = 0x00000120,
356 PAUSE_TIMER = 0x00000124,
357 PAUSE_FRM_DEST_LO = 0x00000128,
358 PAUSE_FRM_DEST_HI = 0x0000012c,
359 MAC_TX_PARAMS = 0x00000134,
360 MAC_TX_PARAMS_JUMBO = (1 << 31),
361 MAC_TX_PARAMS_SIZE_SHIFT = 16,
362 MAC_RX_PARAMS = 0x00000138,
363 MAC_SYS_INT = 0x00000144,
364 MAC_SYS_INT_MASK = 0x00000148,
365 MAC_MGMT_INT = 0x0000014c,
366 MAC_MGMT_IN_MASK = 0x00000150,
367 EXT_ARB_MODE = 0x000001fc,
368
369 /* XGMAC TX statistics registers */
370 TX_PKTS = 0x00000200,
371 TX_BYTES = 0x00000208,
372 TX_MCAST_PKTS = 0x00000210,
373 TX_BCAST_PKTS = 0x00000218,
374 TX_UCAST_PKTS = 0x00000220,
375 TX_CTL_PKTS = 0x00000228,
376 TX_PAUSE_PKTS = 0x00000230,
377 TX_64_PKT = 0x00000238,
378 TX_65_TO_127_PKT = 0x00000240,
379 TX_128_TO_255_PKT = 0x00000248,
380 TX_256_511_PKT = 0x00000250,
381 TX_512_TO_1023_PKT = 0x00000258,
382 TX_1024_TO_1518_PKT = 0x00000260,
383 TX_1519_TO_MAX_PKT = 0x00000268,
384 TX_UNDERSIZE_PKT = 0x00000270,
385 TX_OVERSIZE_PKT = 0x00000278,
386
387 /* XGMAC statistics control registers */
388 RX_HALF_FULL_DET = 0x000002a0,
389 TX_HALF_FULL_DET = 0x000002a4,
390 RX_OVERFLOW_DET = 0x000002a8,
391 TX_OVERFLOW_DET = 0x000002ac,
392 RX_HALF_FULL_MASK = 0x000002b0,
393 TX_HALF_FULL_MASK = 0x000002b4,
394 RX_OVERFLOW_MASK = 0x000002b8,
395 TX_OVERFLOW_MASK = 0x000002bc,
396 STAT_CNT_CTL = 0x000002c0,
397 STAT_CNT_CTL_CLEAR_TX = (1 << 0),
398 STAT_CNT_CTL_CLEAR_RX = (1 << 1),
399 AUX_RX_HALF_FULL_DET = 0x000002d0,
400 AUX_TX_HALF_FULL_DET = 0x000002d4,
401 AUX_RX_OVERFLOW_DET = 0x000002d8,
402 AUX_TX_OVERFLOW_DET = 0x000002dc,
403 AUX_RX_HALF_FULL_MASK = 0x000002f0,
404 AUX_TX_HALF_FULL_MASK = 0x000002f4,
405 AUX_RX_OVERFLOW_MASK = 0x000002f8,
406 AUX_TX_OVERFLOW_MASK = 0x000002fc,
407
408 /* XGMAC RX statistics registers */
409 RX_BYTES = 0x00000300,
410 RX_BYTES_OK = 0x00000308,
411 RX_PKTS = 0x00000310,
412 RX_PKTS_OK = 0x00000318,
413 RX_BCAST_PKTS = 0x00000320,
414 RX_MCAST_PKTS = 0x00000328,
415 RX_UCAST_PKTS = 0x00000330,
416 RX_UNDERSIZE_PKTS = 0x00000338,
417 RX_OVERSIZE_PKTS = 0x00000340,
418 RX_JABBER_PKTS = 0x00000348,
419 RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
420 RX_DROP_EVENTS = 0x00000358,
421 RX_FCERR_PKTS = 0x00000360,
422 RX_ALIGN_ERR = 0x00000368,
423 RX_SYMBOL_ERR = 0x00000370,
424 RX_MAC_ERR = 0x00000378,
425 RX_CTL_PKTS = 0x00000380,
426 RX_PAUSE_PKTS = 0x00000384,
427 RX_64_PKTS = 0x00000390,
428 RX_65_TO_127_PKTS = 0x00000398,
429 RX_128_255_PKTS = 0x000003a0,
430 RX_256_511_PKTS = 0x000003a8,
431 RX_512_TO_1023_PKTS = 0x000003b0,
432 RX_1024_TO_1518_PKTS = 0x000003b8,
433 RX_1519_TO_MAX_PKTS = 0x000003c0,
434 RX_LEN_ERR_PKTS = 0x000003c8,
435
436 /* XGMAC MDIO control registers */
437 MDIO_TX_DATA = 0x00000400,
438 MDIO_RX_DATA = 0x00000410,
439 MDIO_CMD = 0x00000420,
440 MDIO_PHY_ADDR = 0x00000430,
441 MDIO_PORT = 0x00000440,
442 MDIO_STATUS = 0x00000450,
443
444 /* XGMAC AUX statistics registers */
445};
446
447/*
448 * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
449 */
450enum {
451 ETS_QUEUE_SHIFT = 29,
452 ETS_REF = (1 << 26),
453 ETS_RS = (1 << 27),
454 ETS_P = (1 << 28),
455 ETS_FC_COS_SHIFT = 23,
456};
457
458/*
459 * Flash Address Register (FLASH_ADDR) bit definitions.
460 */
461enum {
462 FLASH_ADDR_RDY = (1 << 31),
463 FLASH_ADDR_R = (1 << 30),
464 FLASH_ADDR_ERR = (1 << 29),
465};
466
467/*
468 * Stop CQ Processing Register (CQ_STOP) bit definitions.
469 */
470enum {
471 CQ_STOP_QUEUE_MASK = (0x007f0000),
472 CQ_STOP_TYPE_MASK = (0x03000000),
473 CQ_STOP_TYPE_START = 0x00000100,
474 CQ_STOP_TYPE_STOP = 0x00000200,
475 CQ_STOP_TYPE_READ = 0x00000300,
476 CQ_STOP_EN = (1 << 15),
477};
478
479/*
480 * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
481 */
482enum {
483 MAC_ADDR_IDX_SHIFT = 4,
484 MAC_ADDR_TYPE_SHIFT = 16,
485 MAC_ADDR_TYPE_MASK = 0x000f0000,
486 MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
487 MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
488 MAC_ADDR_TYPE_VLAN = 0x00020000,
489 MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
490 MAC_ADDR_TYPE_FC_MAC = 0x00040000,
491 MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
492 MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
493 MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
494 MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
495 MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
496 MAC_ADDR_ADR = (1 << 25),
497 MAC_ADDR_RS = (1 << 26),
498 MAC_ADDR_E = (1 << 27),
499 MAC_ADDR_MR = (1 << 30),
500 MAC_ADDR_MW = (1 << 31),
501 MAX_MULTICAST_ENTRIES = 32,
502};
503
504/*
505 * MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
506 */
507enum {
508 SPLT_HDR_EP = (1 << 31),
509};
510
511/*
512 * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
513 */
514enum {
515 FC_RCV_CFG_ECT = (1 << 15),
516 FC_RCV_CFG_DFH = (1 << 20),
517 FC_RCV_CFG_DVF = (1 << 21),
518 FC_RCV_CFG_RCE = (1 << 27),
519 FC_RCV_CFG_RFE = (1 << 28),
520 FC_RCV_CFG_TEE = (1 << 29),
521 FC_RCV_CFG_TCE = (1 << 30),
522 FC_RCV_CFG_TFE = (1 << 31),
523};
524
525/*
526 * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
527 */
528enum {
529 NIC_RCV_CFG_PPE = (1 << 0),
530 NIC_RCV_CFG_VLAN_MASK = 0x00060000,
531 NIC_RCV_CFG_VLAN_ALL = 0x00000000,
532 NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
533 NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
534 NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
535 NIC_RCV_CFG_RV = (1 << 3),
536 NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
537 NIC_RCV_CFG_DFQ_SHIFT = 8,
538 NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */
539};
540
541/*
542 * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
543 */
544enum {
545 MGMT_RCV_CFG_ARP = (1 << 0),
546 MGMT_RCV_CFG_DHC = (1 << 1),
547 MGMT_RCV_CFG_DHS = (1 << 2),
548 MGMT_RCV_CFG_NP = (1 << 3),
549 MGMT_RCV_CFG_I6N = (1 << 4),
550 MGMT_RCV_CFG_I6R = (1 << 5),
551 MGMT_RCV_CFG_DH6 = (1 << 6),
552 MGMT_RCV_CFG_UD1 = (1 << 7),
553 MGMT_RCV_CFG_UD0 = (1 << 8),
554 MGMT_RCV_CFG_BCT = (1 << 9),
555 MGMT_RCV_CFG_MCT = (1 << 10),
556 MGMT_RCV_CFG_DM = (1 << 11),
557 MGMT_RCV_CFG_RM = (1 << 12),
558 MGMT_RCV_CFG_STL = (1 << 13),
559 MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
560 MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
561 MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
562 MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
563 MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
564};
565
566/*
567 * Routing Index Register (RT_IDX) bit definitions.
568 */
569enum {
570 RT_IDX_IDX_SHIFT = 8,
571 RT_IDX_TYPE_MASK = 0x000f0000,
572 RT_IDX_TYPE_RT = 0x00000000,
573 RT_IDX_TYPE_RT_INV = 0x00010000,
574 RT_IDX_TYPE_NICQ = 0x00020000,
575 RT_IDX_TYPE_NICQ_INV = 0x00030000,
576 RT_IDX_DST_MASK = 0x00700000,
577 RT_IDX_DST_RSS = 0x00000000,
578 RT_IDX_DST_CAM_Q = 0x00100000,
579 RT_IDX_DST_COS_Q = 0x00200000,
580 RT_IDX_DST_DFLT_Q = 0x00300000,
581 RT_IDX_DST_DEST_Q = 0x00400000,
582 RT_IDX_RS = (1 << 26),
583 RT_IDX_E = (1 << 27),
584 RT_IDX_MR = (1 << 30),
585 RT_IDX_MW = (1 << 31),
586
587 /* Nic Queue format - type 2 bits */
588 RT_IDX_BCAST = (1 << 0),
589 RT_IDX_MCAST = (1 << 1),
590 RT_IDX_MCAST_MATCH = (1 << 2),
591 RT_IDX_MCAST_REG_MATCH = (1 << 3),
592 RT_IDX_MCAST_HASH_MATCH = (1 << 4),
593 RT_IDX_FC_MACH = (1 << 5),
594 RT_IDX_ETH_FCOE = (1 << 6),
595 RT_IDX_CAM_HIT = (1 << 7),
596 RT_IDX_CAM_BIT0 = (1 << 8),
597 RT_IDX_CAM_BIT1 = (1 << 9),
598 RT_IDX_VLAN_TAG = (1 << 10),
599 RT_IDX_VLAN_MATCH = (1 << 11),
600 RT_IDX_VLAN_FILTER = (1 << 12),
601 RT_IDX_ETH_SKIP1 = (1 << 13),
602 RT_IDX_ETH_SKIP2 = (1 << 14),
603 RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
604 RT_IDX_802_3 = (1 << 16),
605 RT_IDX_LLDP = (1 << 17),
606 RT_IDX_UNUSED018 = (1 << 18),
607 RT_IDX_UNUSED019 = (1 << 19),
608 RT_IDX_UNUSED20 = (1 << 20),
609 RT_IDX_UNUSED21 = (1 << 21),
610 RT_IDX_ERR = (1 << 22),
611 RT_IDX_VALID = (1 << 23),
612 RT_IDX_TU_CSUM_ERR = (1 << 24),
613 RT_IDX_IP_CSUM_ERR = (1 << 25),
614 RT_IDX_MAC_ERR = (1 << 26),
615 RT_IDX_RSS_TCP6 = (1 << 27),
616 RT_IDX_RSS_TCP4 = (1 << 28),
617 RT_IDX_RSS_IPV6 = (1 << 29),
618 RT_IDX_RSS_IPV4 = (1 << 30),
619 RT_IDX_RSS_MATCH = (1 << 31),
620
621 /* Hierarchy for the NIC Queue Mask */
622 RT_IDX_ALL_ERR_SLOT = 0,
623 RT_IDX_MAC_ERR_SLOT = 0,
624 RT_IDX_IP_CSUM_ERR_SLOT = 1,
625 RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
626 RT_IDX_BCAST_SLOT = 3,
627 RT_IDX_MCAST_MATCH_SLOT = 4,
628 RT_IDX_ALLMULTI_SLOT = 5,
629 RT_IDX_UNUSED6_SLOT = 6,
630 RT_IDX_UNUSED7_SLOT = 7,
631 RT_IDX_RSS_MATCH_SLOT = 8,
632 RT_IDX_RSS_IPV4_SLOT = 8,
633 RT_IDX_RSS_IPV6_SLOT = 9,
634 RT_IDX_RSS_TCP4_SLOT = 10,
635 RT_IDX_RSS_TCP6_SLOT = 11,
636 RT_IDX_CAM_HIT_SLOT = 12,
637 RT_IDX_UNUSED013 = 13,
638 RT_IDX_UNUSED014 = 14,
639 RT_IDX_PROMISCUOUS_SLOT = 15,
640 RT_IDX_MAX_SLOTS = 16,
641};
642
643/*
644 * Control Register Set Map
645 */
646enum {
647 PROC_ADDR = 0, /* Use semaphore */
648 PROC_DATA = 0x04, /* Use semaphore */
649 SYS = 0x08,
650 RST_FO = 0x0c,
651 FSC = 0x10,
652 CSR = 0x14,
653 LED = 0x18,
654 ICB_RID = 0x1c, /* Use semaphore */
655 ICB_L = 0x20, /* Use semaphore */
656 ICB_H = 0x24, /* Use semaphore */
657 CFG = 0x28,
658 BIOS_ADDR = 0x2c,
659 STS = 0x30,
660 INTR_EN = 0x34,
661 INTR_MASK = 0x38,
662 ISR1 = 0x3c,
663 ISR2 = 0x40,
664 ISR3 = 0x44,
665 ISR4 = 0x48,
666 REV_ID = 0x4c,
667 FRC_ECC_ERR = 0x50,
668 ERR_STS = 0x54,
669 RAM_DBG_ADDR = 0x58,
670 RAM_DBG_DATA = 0x5c,
671 ECC_ERR_CNT = 0x60,
672 SEM = 0x64,
673 GPIO_1 = 0x68, /* Use semaphore */
674 GPIO_2 = 0x6c, /* Use semaphore */
675 GPIO_3 = 0x70, /* Use semaphore */
676 RSVD2 = 0x74,
677 XGMAC_ADDR = 0x78, /* Use semaphore */
678 XGMAC_DATA = 0x7c, /* Use semaphore */
679 NIC_ETS = 0x80,
680 CNA_ETS = 0x84,
681 FLASH_ADDR = 0x88, /* Use semaphore */
682 FLASH_DATA = 0x8c, /* Use semaphore */
683 CQ_STOP = 0x90,
684 PAGE_TBL_RID = 0x94,
685 WQ_PAGE_TBL_LO = 0x98,
686 WQ_PAGE_TBL_HI = 0x9c,
687 CQ_PAGE_TBL_LO = 0xa0,
688 CQ_PAGE_TBL_HI = 0xa4,
689 MAC_ADDR_IDX = 0xa8, /* Use semaphore */
690 MAC_ADDR_DATA = 0xac, /* Use semaphore */
691 COS_DFLT_CQ1 = 0xb0,
692 COS_DFLT_CQ2 = 0xb4,
693 ETYPE_SKIP1 = 0xb8,
694 ETYPE_SKIP2 = 0xbc,
695 SPLT_HDR = 0xc0,
696 FC_PAUSE_THRES = 0xc4,
697 NIC_PAUSE_THRES = 0xc8,
698 FC_ETHERTYPE = 0xcc,
699 FC_RCV_CFG = 0xd0,
700 NIC_RCV_CFG = 0xd4,
701 FC_COS_TAGS = 0xd8,
702 NIC_COS_TAGS = 0xdc,
703 MGMT_RCV_CFG = 0xe0,
704 RT_IDX = 0xe4,
705 RT_DATA = 0xe8,
706 RSVD7 = 0xec,
707 XG_SERDES_ADDR = 0xf0,
708 XG_SERDES_DATA = 0xf4,
709 PRB_MX_ADDR = 0xf8, /* Use semaphore */
710 PRB_MX_DATA = 0xfc, /* Use semaphore */
711};
712
713/*
714 * CAM output format.
715 */
716enum {
717 CAM_OUT_ROUTE_FC = 0,
718 CAM_OUT_ROUTE_NIC = 1,
719 CAM_OUT_FUNC_SHIFT = 2,
720 CAM_OUT_RV = (1 << 4),
721 CAM_OUT_SH = (1 << 15),
722 CAM_OUT_CQ_ID_SHIFT = 5,
723};
724
725/*
726 * Mailbox definitions
727 */
728enum {
729 /* Asynchronous Event Notifications */
730 AEN_SYS_ERR = 0x00008002,
731 AEN_LINK_UP = 0x00008011,
732 AEN_LINK_DOWN = 0x00008012,
733 AEN_IDC_CMPLT = 0x00008100,
734 AEN_IDC_REQ = 0x00008101,
735 AEN_FW_INIT_DONE = 0x00008400,
736 AEN_FW_INIT_FAIL = 0x00008401,
737
738 /* Mailbox Command Opcodes. */
739 MB_CMD_NOP = 0x00000000,
740 MB_CMD_EX_FW = 0x00000002,
741 MB_CMD_MB_TEST = 0x00000006,
742 MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
743 MB_CMD_ABOUT_FW = 0x00000008,
744 MB_CMD_LOAD_RISC_RAM = 0x0000000b,
745 MB_CMD_DUMP_RISC_RAM = 0x0000000c,
746 MB_CMD_WRITE_RAM = 0x0000000d,
747 MB_CMD_READ_RAM = 0x0000000f,
748 MB_CMD_STOP_FW = 0x00000014,
749 MB_CMD_MAKE_SYS_ERR = 0x0000002a,
750 MB_CMD_INIT_FW = 0x00000060,
751 MB_CMD_GET_INIT_CB = 0x00000061,
752 MB_CMD_GET_FW_STATE = 0x00000069,
753 MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
754 MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
755 MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
756 MB_WOL_DISABLE = 0x00000000,
757 MB_WOL_MAGIC_PKT = 0x00000001,
758 MB_WOL_FLTR = 0x00000002,
759 MB_WOL_UCAST = 0x00000004,
760 MB_WOL_MCAST = 0x00000008,
761 MB_WOL_BCAST = 0x00000010,
762 MB_WOL_LINK_UP = 0x00000020,
763 MB_WOL_LINK_DOWN = 0x00000040,
764 MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
765 MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
766 MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
767 MB_CMD_CLEAR_WOL_MAGIC = 0x00000114, /* Wake On Lan Magic Packet */
768 MB_CMD_PORT_RESET = 0x00000120,
769 MB_CMD_SET_PORT_CFG = 0x00000122,
770 MB_CMD_GET_PORT_CFG = 0x00000123,
771 MB_CMD_SET_ASIC_VOLTS = 0x00000130,
772 MB_CMD_GET_SNS_DATA = 0x00000131, /* Temp and Volt Sense data. */
773
774 /* Mailbox Command Status. */
775 MB_CMD_STS_GOOD = 0x00004000, /* Success. */
776 MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
777 MB_CMD_STS_ERR = 0x00004005, /* Error. */
778};
779
780struct mbox_params {
781 u32 mbox_in[MAILBOX_COUNT];
782 u32 mbox_out[MAILBOX_COUNT];
783 int in_count;
784 int out_count;
785};
786
787struct flash_params {
788 u8 dev_id_str[4];
789 u16 size;
790 u16 csum;
791 u16 ver;
792 u16 sub_dev_id;
793 u8 mac_addr[6];
794 u16 res;
795};
796
797
798/*
799 * doorbell space for the rx ring context
800 */
801struct rx_doorbell_context {
802 u32 cnsmr_idx; /* 0x00 */
803 u32 valid; /* 0x04 */
804 u32 reserved[4]; /* 0x08-0x14 */
805 u32 lbq_prod_idx; /* 0x18 */
806 u32 sbq_prod_idx; /* 0x1c */
807};
808
809/*
810 * doorbell space for the tx ring context
811 */
812struct tx_doorbell_context {
813 u32 prod_idx; /* 0x00 */
814 u32 valid; /* 0x04 */
815 u32 reserved[4]; /* 0x08-0x14 */
816 u32 lbq_prod_idx; /* 0x18 */
817 u32 sbq_prod_idx; /* 0x1c */
818};
819
820/* DATA STRUCTURES SHARED WITH HARDWARE. */
821
822struct bq_element {
823 u32 addr_lo;
824#define BQ_END 0x00000001
825#define BQ_CONT 0x00000002
826#define BQ_MASK 0x00000003
827 u32 addr_hi;
828} __attribute((packed));
829
830struct tx_buf_desc {
831 __le64 addr;
832 __le32 len;
833#define TX_DESC_LEN_MASK 0x000fffff
834#define TX_DESC_C 0x40000000
835#define TX_DESC_E 0x80000000
836} __attribute((packed));
837
838/*
839 * IOCB Definitions...
840 */
841
842#define OPCODE_OB_MAC_IOCB 0x01
843#define OPCODE_OB_MAC_TSO_IOCB 0x02
844#define OPCODE_IB_MAC_IOCB 0x20
845#define OPCODE_IB_MPI_IOCB 0x21
846#define OPCODE_IB_AE_IOCB 0x3f
847
848struct ob_mac_iocb_req {
849 u8 opcode;
850 u8 flags1;
851#define OB_MAC_IOCB_REQ_OI 0x01
852#define OB_MAC_IOCB_REQ_I 0x02
853#define OB_MAC_IOCB_REQ_D 0x08
854#define OB_MAC_IOCB_REQ_F 0x10
855 u8 flags2;
856 u8 flags3;
857#define OB_MAC_IOCB_DFP 0x02
858#define OB_MAC_IOCB_V 0x04
859 __le32 reserved1[2];
860 __le16 frame_len;
861#define OB_MAC_IOCB_LEN_MASK 0x3ffff
862 __le16 reserved2;
863 __le32 tid;
864 __le32 txq_idx;
865 __le32 reserved3;
866 __le16 vlan_tci;
867 __le16 reserved4;
868 struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
869} __attribute((packed));
870
871struct ob_mac_iocb_rsp {
872 u8 opcode; /* */
873 u8 flags1; /* */
874#define OB_MAC_IOCB_RSP_OI 0x01 /* */
875#define OB_MAC_IOCB_RSP_I 0x02 /* */
876#define OB_MAC_IOCB_RSP_E 0x08 /* */
877#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */
878#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */
879#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */
880 u8 flags2; /* */
881 u8 flags3; /* */
882#define OB_MAC_IOCB_RSP_B 0x80 /* */
883 __le32 tid;
884 __le32 txq_idx;
885 __le32 reserved[13];
886} __attribute((packed));
887
888struct ob_mac_tso_iocb_req {
889 u8 opcode;
890 u8 flags1;
891#define OB_MAC_TSO_IOCB_OI 0x01
892#define OB_MAC_TSO_IOCB_I 0x02
893#define OB_MAC_TSO_IOCB_D 0x08
894#define OB_MAC_TSO_IOCB_IP4 0x40
895#define OB_MAC_TSO_IOCB_IP6 0x80
896 u8 flags2;
897#define OB_MAC_TSO_IOCB_LSO 0x20
898#define OB_MAC_TSO_IOCB_UC 0x40
899#define OB_MAC_TSO_IOCB_TC 0x80
900 u8 flags3;
901#define OB_MAC_TSO_IOCB_IC 0x01
902#define OB_MAC_TSO_IOCB_DFP 0x02
903#define OB_MAC_TSO_IOCB_V 0x04
904 __le32 reserved1[2];
905 __le32 frame_len;
906 __le32 tid;
907 __le32 txq_idx;
908 __le16 total_hdrs_len;
909 __le16 net_trans_offset;
910#define OB_MAC_TRANSPORT_HDR_SHIFT 6
911 __le16 vlan_tci;
912 __le16 mss;
913 struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
914} __attribute((packed));
915
916struct ob_mac_tso_iocb_rsp {
917 u8 opcode;
918 u8 flags1;
919#define OB_MAC_TSO_IOCB_RSP_OI 0x01
920#define OB_MAC_TSO_IOCB_RSP_I 0x02
921#define OB_MAC_TSO_IOCB_RSP_E 0x08
922#define OB_MAC_TSO_IOCB_RSP_S 0x10
923#define OB_MAC_TSO_IOCB_RSP_L 0x20
924#define OB_MAC_TSO_IOCB_RSP_P 0x40
925 u8 flags2; /* */
926 u8 flags3; /* */
927#define OB_MAC_TSO_IOCB_RSP_B 0x8000
928 __le32 tid;
929 __le32 txq_idx;
930 __le32 reserved2[13];
931} __attribute((packed));
932
933struct ib_mac_iocb_rsp {
934 u8 opcode; /* 0x20 */
935 u8 flags1;
936#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
937#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
938#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
939#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
940#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
941#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
942#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
943#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
944#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
945#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
946#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
947 u8 flags2;
948#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
949#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */
950#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */
951#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
952#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
953#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
954#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
955#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
956#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
957#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */
958#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */
959#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
960 u8 flags3;
961#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
962#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
963#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
964#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
965#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
966#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
967#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
968#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
969#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
970#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
971#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
972 __le32 data_len; /* */
973 __le32 data_addr_lo; /* */
974 __le32 data_addr_hi; /* */
975 __le32 rss; /* */
976 __le16 vlan_id; /* 12 bits */
977#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
978#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
979
980 __le16 reserved1;
981 __le32 reserved2[6];
982 __le32 flags4;
983#define IB_MAC_IOCB_RSP_HV 0x20000000 /* */
984#define IB_MAC_IOCB_RSP_HS 0x40000000 /* */
985#define IB_MAC_IOCB_RSP_HL 0x80000000 /* */
986 __le32 hdr_len; /* */
987 __le32 hdr_addr_lo; /* */
988 __le32 hdr_addr_hi; /* */
989} __attribute((packed));
990
991struct ib_ae_iocb_rsp {
992 u8 opcode;
993 u8 flags1;
994#define IB_AE_IOCB_RSP_OI 0x01
995#define IB_AE_IOCB_RSP_I 0x02
996 u8 event;
997#define LINK_UP_EVENT 0x00
998#define LINK_DOWN_EVENT 0x01
999#define CAM_LOOKUP_ERR_EVENT 0x06
1000#define SOFT_ECC_ERROR_EVENT 0x07
1001#define MGMT_ERR_EVENT 0x08
1002#define TEN_GIG_MAC_EVENT 0x09
1003#define GPI0_H2L_EVENT 0x10
1004#define GPI0_L2H_EVENT 0x20
1005#define GPI1_H2L_EVENT 0x11
1006#define GPI1_L2H_EVENT 0x21
1007#define PCI_ERR_ANON_BUF_RD 0x40
1008 u8 q_id;
1009 __le32 reserved[15];
1010} __attribute((packed));
1011
1012/*
1013 * These three structures are for generic
1014 * handling of ib and ob iocbs.
1015 */
1016struct ql_net_rsp_iocb {
1017 u8 opcode;
1018 u8 flags0;
1019 __le16 length;
1020 __le32 tid;
1021 __le32 reserved[14];
1022} __attribute((packed));
1023
1024struct net_req_iocb {
1025 u8 opcode;
1026 u8 flags0;
1027 __le16 flags1;
1028 __le32 tid;
1029 __le32 reserved1[30];
1030} __attribute((packed));
1031
1032/*
1033 * tx ring initialization control block for chip.
1034 * It is defined as:
1035 * "Work Queue Initialization Control Block"
1036 */
1037struct wqicb {
1038 __le16 len;
1039#define Q_LEN_V (1 << 4)
1040#define Q_LEN_CPP_CONT 0x0000
1041#define Q_LEN_CPP_16 0x0001
1042#define Q_LEN_CPP_32 0x0002
1043#define Q_LEN_CPP_64 0x0003
1044 __le16 flags;
1045#define Q_PRI_SHIFT 1
1046#define Q_FLAGS_LC 0x1000
1047#define Q_FLAGS_LB 0x2000
1048#define Q_FLAGS_LI 0x4000
1049#define Q_FLAGS_LO 0x8000
1050 __le16 cq_id_rss;
1051#define Q_CQ_ID_RSS_RV 0x8000
1052 __le16 rid;
1053 __le32 addr_lo;
1054 __le32 addr_hi;
1055 __le32 cnsmr_idx_addr_lo;
1056 __le32 cnsmr_idx_addr_hi;
1057} __attribute((packed));
1058
1059/*
1060 * rx ring initialization control block for chip.
1061 * It is defined as:
1062 * "Completion Queue Initialization Control Block"
1063 */
1064struct cqicb {
1065 u8 msix_vect;
1066 u8 reserved1;
1067 u8 reserved2;
1068 u8 flags;
1069#define FLAGS_LV 0x08
1070#define FLAGS_LS 0x10
1071#define FLAGS_LL 0x20
1072#define FLAGS_LI 0x40
1073#define FLAGS_LC 0x80
1074 __le16 len;
1075#define LEN_V (1 << 4)
1076#define LEN_CPP_CONT 0x0000
1077#define LEN_CPP_32 0x0001
1078#define LEN_CPP_64 0x0002
1079#define LEN_CPP_128 0x0003
1080 __le16 rid;
1081 __le32 addr_lo;
1082 __le32 addr_hi;
1083 __le32 prod_idx_addr_lo;
1084 __le32 prod_idx_addr_hi;
1085 __le16 pkt_delay;
1086 __le16 irq_delay;
1087 __le32 lbq_addr_lo;
1088 __le32 lbq_addr_hi;
1089 __le16 lbq_buf_size;
1090 __le16 lbq_len; /* entry count */
1091 __le32 sbq_addr_lo;
1092 __le32 sbq_addr_hi;
1093 __le16 sbq_buf_size;
1094 __le16 sbq_len; /* entry count */
1095} __attribute((packed));
1096
1097struct ricb {
1098 u8 base_cq;
1099#define RSS_L4K 0x80
1100 u8 flags;
1101#define RSS_L6K 0x01
1102#define RSS_LI 0x02
1103#define RSS_LB 0x04
1104#define RSS_LM 0x08
1105#define RSS_RI4 0x10
1106#define RSS_RT4 0x20
1107#define RSS_RI6 0x40
1108#define RSS_RT6 0x80
1109 __le16 mask;
1110 __le32 hash_cq_id[256];
1111 __le32 ipv6_hash_key[10];
1112 __le32 ipv4_hash_key[4];
1113} __attribute((packed));
1114
1115/* SOFTWARE/DRIVER DATA STRUCTURES. */
1116
1117struct oal {
1118 struct tx_buf_desc oal[TX_DESC_PER_OAL];
1119};
1120
1121struct map_list {
1122 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1123 DECLARE_PCI_UNMAP_LEN(maplen);
1124};
1125
1126struct tx_ring_desc {
1127 struct sk_buff *skb;
1128 struct ob_mac_iocb_req *queue_entry;
1129 int index;
1130 struct oal oal;
1131 struct map_list map[MAX_SKB_FRAGS + 1];
1132 int map_cnt;
1133 struct tx_ring_desc *next;
1134};
1135
1136struct bq_desc {
1137 union {
1138 struct page *lbq_page;
1139 struct sk_buff *skb;
1140 } p;
1141 struct bq_element *bq;
1142 int index;
1143 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1144 DECLARE_PCI_UNMAP_LEN(maplen);
1145};
1146
1147#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
1148
1149struct tx_ring {
1150 /*
1151 * queue info.
1152 */
1153 struct wqicb wqicb; /* structure used to inform chip of new queue */
1154 void *wq_base; /* pci_alloc:virtual addr for tx */
1155 dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
1156 u32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
1157 dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
1158 u32 wq_size; /* size in bytes of queue area */
1159 u32 wq_len; /* number of entries in queue */
1160 void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */
1161 void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */
1162 u16 prod_idx; /* current value for prod idx */
1163 u16 cq_id; /* completion (rx) queue for tx completions */
1164 u8 wq_id; /* queue id for this entry */
1165 u8 reserved1[3];
1166 struct tx_ring_desc *q; /* descriptor list for the queue */
1167 spinlock_t lock;
1168 atomic_t tx_count; /* counts down for every outstanding IO */
1169 atomic_t queue_stopped; /* Turns queue off when full. */
1170 struct delayed_work tx_work;
1171 struct ql_adapter *qdev;
1172};
1173
1174/*
1175 * Type of inbound queue.
1176 */
1177enum {
1178 DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
1179 TX_Q = 3, /* Handles outbound completions. */
1180 RX_Q = 4, /* Handles inbound completions. */
1181};
1182
1183struct rx_ring {
1184 struct cqicb cqicb; /* The chip's completion queue init control block. */
1185
1186 /* Completion queue elements. */
1187 void *cq_base;
1188 dma_addr_t cq_base_dma;
1189 u32 cq_size;
1190 u32 cq_len;
1191 u16 cq_id;
1192 u32 *prod_idx_sh_reg; /* Shadowed producer register. */
1193 dma_addr_t prod_idx_sh_reg_dma;
1194 void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
1195 u32 cnsmr_idx; /* current sw idx */
1196 struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */
1197 void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
1198
1199 /* Large buffer queue elements. */
1200 u32 lbq_len; /* entry count */
1201 u32 lbq_size; /* size in bytes of queue */
1202 u32 lbq_buf_size;
1203 void *lbq_base;
1204 dma_addr_t lbq_base_dma;
1205 void *lbq_base_indirect;
1206 dma_addr_t lbq_base_indirect_dma;
1207 struct bq_desc *lbq; /* array of control blocks */
1208 void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
1209 u32 lbq_prod_idx; /* current sw prod idx */
1210 u32 lbq_curr_idx; /* next entry we expect */
1211 u32 lbq_clean_idx; /* beginning of new descs */
1212 u32 lbq_free_cnt; /* free buffer desc cnt */
1213
1214 /* Small buffer queue elements. */
1215 u32 sbq_len; /* entry count */
1216 u32 sbq_size; /* size in bytes of queue */
1217 u32 sbq_buf_size;
1218 void *sbq_base;
1219 dma_addr_t sbq_base_dma;
1220 void *sbq_base_indirect;
1221 dma_addr_t sbq_base_indirect_dma;
1222 struct bq_desc *sbq; /* array of control blocks */
1223 void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
1224 u32 sbq_prod_idx; /* current sw prod idx */
1225 u32 sbq_curr_idx; /* next entry we expect */
1226 u32 sbq_clean_idx; /* beginning of new descs */
1227 u32 sbq_free_cnt; /* free buffer desc cnt */
1228
1229 /* Misc. handler elements. */
1230 u32 type; /* Type of queue, tx, rx, or default. */
1231 u32 irq; /* Which vector this ring is assigned. */
1232 u32 cpu; /* Which CPU this should run on. */
1233 char name[IFNAMSIZ + 5];
1234 struct napi_struct napi;
1235 struct delayed_work rx_work;
1236 u8 reserved;
1237 struct ql_adapter *qdev;
1238};
1239
1240/*
1241 * RSS Initialization Control Block
1242 */
1243struct hash_id {
1244 u8 value[4];
1245};
1246
1247struct nic_stats {
1248 /*
1249 * These stats come from offset 200h to 278h
1250 * in the XGMAC register.
1251 */
1252 u64 tx_pkts;
1253 u64 tx_bytes;
1254 u64 tx_mcast_pkts;
1255 u64 tx_bcast_pkts;
1256 u64 tx_ucast_pkts;
1257 u64 tx_ctl_pkts;
1258 u64 tx_pause_pkts;
1259 u64 tx_64_pkt;
1260 u64 tx_65_to_127_pkt;
1261 u64 tx_128_to_255_pkt;
1262 u64 tx_256_511_pkt;
1263 u64 tx_512_to_1023_pkt;
1264 u64 tx_1024_to_1518_pkt;
1265 u64 tx_1519_to_max_pkt;
1266 u64 tx_undersize_pkt;
1267 u64 tx_oversize_pkt;
1268
1269 /*
1270 * These stats come from offset 300h to 3C8h
1271 * in the XGMAC register.
1272 */
1273 u64 rx_bytes;
1274 u64 rx_bytes_ok;
1275 u64 rx_pkts;
1276 u64 rx_pkts_ok;
1277 u64 rx_bcast_pkts;
1278 u64 rx_mcast_pkts;
1279 u64 rx_ucast_pkts;
1280 u64 rx_undersize_pkts;
1281 u64 rx_oversize_pkts;
1282 u64 rx_jabber_pkts;
1283 u64 rx_undersize_fcerr_pkts;
1284 u64 rx_drop_events;
1285 u64 rx_fcerr_pkts;
1286 u64 rx_align_err;
1287 u64 rx_symbol_err;
1288 u64 rx_mac_err;
1289 u64 rx_ctl_pkts;
1290 u64 rx_pause_pkts;
1291 u64 rx_64_pkts;
1292 u64 rx_65_to_127_pkts;
1293 u64 rx_128_255_pkts;
1294 u64 rx_256_511_pkts;
1295 u64 rx_512_to_1023_pkts;
1296 u64 rx_1024_to_1518_pkts;
1297 u64 rx_1519_to_max_pkts;
1298 u64 rx_len_err_pkts;
1299};
1300
1301/*
1302 * intr_context structure is used during initialization
1303 * to hook the interrupts. It is also used in a single
1304 * irq environment as a context to the ISR.
1305 */
1306struct intr_context {
1307 struct ql_adapter *qdev;
1308 u32 intr;
1309 u32 hooked;
1310 u32 intr_en_mask; /* value/mask used to enable this intr */
1311 u32 intr_dis_mask; /* value/mask used to disable this intr */
1312 u32 intr_read_mask; /* value/mask used to read this intr */
1313 char name[IFNAMSIZ * 2];
1314 atomic_t irq_cnt; /* irq_cnt is used in single vector
1315 * environment. It's incremented for each
1316 * irq handler that is scheduled. When each
1317 * handler finishes it decrements irq_cnt and
1318 * enables interrupts if it's zero. */
1319 irq_handler_t handler;
1320};
1321
1322/* adapter flags definitions. */
1323enum {
1324 QL_ADAPTER_UP = (1 << 0), /* Adapter has been brought up. */
1325 QL_LEGACY_ENABLED = (1 << 3),
1326 QL_MSI_ENABLED = (1 << 3),
1327 QL_MSIX_ENABLED = (1 << 4),
1328 QL_DMA64 = (1 << 5),
1329 QL_PROMISCUOUS = (1 << 6),
1330 QL_ALLMULTI = (1 << 7),
1331};
1332
1333/* link_status bit definitions */
1334enum {
1335 LOOPBACK_MASK = 0x00000700,
1336 LOOPBACK_PCS = 0x00000100,
1337 LOOPBACK_HSS = 0x00000200,
1338 LOOPBACK_EXT = 0x00000300,
1339 PAUSE_MASK = 0x000000c0,
1340 PAUSE_STD = 0x00000040,
1341 PAUSE_PRI = 0x00000080,
1342 SPEED_MASK = 0x00000038,
1343 SPEED_100Mb = 0x00000000,
1344 SPEED_1Gb = 0x00000008,
1345 SPEED_10Gb = 0x00000010,
1346 LINK_TYPE_MASK = 0x00000007,
1347 LINK_TYPE_XFI = 0x00000001,
1348 LINK_TYPE_XAUI = 0x00000002,
1349 LINK_TYPE_XFI_BP = 0x00000003,
1350 LINK_TYPE_XAUI_BP = 0x00000004,
1351 LINK_TYPE_10GBASET = 0x00000005,
1352};
1353
1354/*
1355 * The main Adapter structure definition.
1356 * This structure has all fields relevant to the hardware.
1357 */
1358struct ql_adapter {
1359 struct ricb ricb;
1360 unsigned long flags;
1361 u32 wol;
1362
1363 struct nic_stats nic_stats;
1364
1365 struct vlan_group *vlgrp;
1366
1367 /* PCI Configuration information for this device */
1368 struct pci_dev *pdev;
1369 struct net_device *ndev; /* Parent NET device */
1370
1371 /* Hardware information */
1372 u32 chip_rev_id;
1373 u32 func; /* PCI function for this adapter */
1374
1375 spinlock_t adapter_lock;
1376 spinlock_t hw_lock;
1377 spinlock_t stats_lock;
1378 spinlock_t legacy_lock; /* used for maintaining legacy intr sync */
1379
1380 /* PCI Bus Relative Register Addresses */
1381 void __iomem *reg_base;
1382 void __iomem *doorbell_area;
1383 u32 doorbell_area_size;
1384
1385 u32 msg_enable;
1386
1387 /* Page for Shadow Registers */
1388 void *rx_ring_shadow_reg_area;
1389 dma_addr_t rx_ring_shadow_reg_dma;
1390 void *tx_ring_shadow_reg_area;
1391 dma_addr_t tx_ring_shadow_reg_dma;
1392
1393 u32 mailbox_in;
1394 u32 mailbox_out;
1395
1396 int tx_ring_size;
1397 int rx_ring_size;
1398 u32 intr_count;
1399 struct msix_entry *msi_x_entry;
1400 struct intr_context intr_context[MAX_RX_RINGS];
1401
1402 int (*legacy_check) (struct ql_adapter *);
1403
1404 int tx_ring_count; /* One per online CPU. */
1405 u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */
1406 u32 rss_ring_count; /* One per online CPU. */
1407 /*
1408 * rx_ring_count =
1409 * one default queue +
1410 * (CPU count * outbound completion rx_ring) +
1411 * (CPU count * inbound (RSS) completion rx_ring)
1412 */
1413 int rx_ring_count;
1414 int ring_mem_size;
1415 void *ring_mem;
1416 struct rx_ring *rx_ring;
1417 int rx_csum;
1418 struct tx_ring *tx_ring;
1419 u32 default_rx_queue;
1420
1421 u16 rx_coalesce_usecs; /* cqicb->int_delay */
1422 u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */
1423 u16 tx_coalesce_usecs; /* cqicb->int_delay */
1424 u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */
1425
1426 u32 xg_sem_mask;
1427 u32 port_link_up;
1428 u32 port_init;
1429 u32 link_status;
1430
1431 struct flash_params flash;
1432
1433 struct net_device_stats stats;
1434 struct workqueue_struct *q_workqueue;
1435 struct workqueue_struct *workqueue;
1436 struct delayed_work asic_reset_work;
1437 struct delayed_work mpi_reset_work;
1438 struct delayed_work mpi_work;
1439};
1440
1441/*
1442 * Typical Register accessor for memory mapped device.
1443 */
1444static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
1445{
1446 return readl(qdev->reg_base + reg);
1447}
1448
1449/*
1450 * Typical Register accessor for memory mapped device.
1451 */
1452static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
1453{
1454 writel(val, qdev->reg_base + reg);
1455}
1456
1457/*
1458 * Doorbell Registers:
1459 * Doorbell registers are virtual registers in the PCI memory space.
1460 * The space is allocated by the chip during PCI initialization. The
1461 * device driver finds the doorbell address in BAR 3 in PCI config space.
1462 * The registers are used to control outbound and inbound queues. For
1463 * example, the producer index for an outbound queue. Each queue uses
1464 * 1 4k chunk of memory. The lower half of the space is for outbound
1465 * queues. The upper half is for inbound queues.
1466 */
1467static inline void ql_write_db_reg(u32 val, void __iomem *addr)
1468{
1469 writel(val, addr);
1470 mmiowb();
1471}
1472
1473/*
1474 * Shadow Registers:
1475 * Outbound queues have a consumer index that is maintained by the chip.
1476 * Inbound queues have a producer index that is maintained by the chip.
1477 * For lower overhead, these registers are "shadowed" to host memory
1478 * which allows the device driver to track the queue progress without
1479 * PCI reads. When an entry is placed on an inbound queue, the chip will
1480 * update the relevant index register and then copy the value to the
1481 * shadow register in host memory.
1482 */
1483static inline unsigned int ql_read_sh_reg(const volatile void *addr)
1484{
1485 return *(volatile unsigned int __force *)addr;
1486}
1487
1488extern char qlge_driver_name[];
1489extern const char qlge_driver_version[];
1490extern const struct ethtool_ops qlge_ethtool_ops;
1491
1492extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
1493extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
1494extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
1495extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
1496 u32 *value);
1497extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
1498extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
1499 u16 q_id);
1500void ql_queue_fw_error(struct ql_adapter *qdev);
1501void ql_mpi_work(struct work_struct *work);
1502void ql_mpi_reset_work(struct work_struct *work);
1503int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
1504void ql_queue_asic_error(struct ql_adapter *qdev);
1505void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
1506void ql_set_ethtool_ops(struct net_device *ndev);
1507int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
1508
1509#if 1
1510#define QL_ALL_DUMP
1511#define QL_REG_DUMP
1512#define QL_DEV_DUMP
1513#define QL_CB_DUMP
1514/* #define QL_IB_DUMP */
1515/* #define QL_OB_DUMP */
1516#endif
1517
1518#ifdef QL_REG_DUMP
1519extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
1520extern void ql_dump_routing_entries(struct ql_adapter *qdev);
1521extern void ql_dump_regs(struct ql_adapter *qdev);
1522#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
1523#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
1524#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
1525#else
1526#define QL_DUMP_REGS(qdev)
1527#define QL_DUMP_ROUTE(qdev)
1528#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
1529#endif
1530
1531#ifdef QL_STAT_DUMP
1532extern void ql_dump_stat(struct ql_adapter *qdev);
1533#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
1534#else
1535#define QL_DUMP_STAT(qdev)
1536#endif
1537
1538#ifdef QL_DEV_DUMP
1539extern void ql_dump_qdev(struct ql_adapter *qdev);
1540#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
1541#else
1542#define QL_DUMP_QDEV(qdev)
1543#endif
1544
1545#ifdef QL_CB_DUMP
1546extern void ql_dump_wqicb(struct wqicb *wqicb);
1547extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
1548extern void ql_dump_ricb(struct ricb *ricb);
1549extern void ql_dump_cqicb(struct cqicb *cqicb);
1550extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
1551extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
1552#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
1553#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
1554#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
1555#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
1556#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
1557#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
1558 ql_dump_hw_cb(qdev, size, bit, q_id)
1559#else
1560#define QL_DUMP_RICB(ricb)
1561#define QL_DUMP_WQICB(wqicb)
1562#define QL_DUMP_TX_RING(tx_ring)
1563#define QL_DUMP_CQICB(cqicb)
1564#define QL_DUMP_RX_RING(rx_ring)
1565#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
1566#endif
1567
1568#ifdef QL_OB_DUMP
1569extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
1570extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
1571extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
1572#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
1573#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
1574#else
1575#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
1576#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
1577#endif
1578
1579#ifdef QL_IB_DUMP
1580extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
1581#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
1582#else
1583#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
1584#endif
1585
1586#ifdef QL_ALL_DUMP
1587extern void ql_dump_all(struct ql_adapter *qdev);
1588#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
1589#else
1590#define QL_DUMP_ALL(qdev)
1591#endif
1592
1593#endif /* _QLGE_H_ */
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
new file mode 100644
index 000000000000..f0392b166170
--- /dev/null
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -0,0 +1,858 @@
1#include "qlge.h"
2
3#ifdef QL_REG_DUMP
4static void ql_dump_intr_states(struct ql_adapter *qdev)
5{
6 int i;
7 u32 value;
8 for (i = 0; i < qdev->intr_count; i++) {
9 ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
10 value = ql_read32(qdev, INTR_EN);
11 printk(KERN_ERR PFX
12 "%s: Interrupt %d is %s.\n",
13 qdev->ndev->name, i,
14 (value & INTR_EN_EN ? "enabled" : "disabled"));
15 }
16}
17
18void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
19{
20 u32 data;
21 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
22 printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__);
23 return;
24 }
25 ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data);
26 printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name,
27 data);
28 ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data);
29 printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name,
30 data);
31 ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
32 printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name,
33 data);
34 ql_read_xgmac_reg(qdev, TX_CFG, &data);
35 printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
36 ql_read_xgmac_reg(qdev, RX_CFG, &data);
37 printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
38 ql_read_xgmac_reg(qdev, FLOW_CTL, &data);
39 printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name,
40 data);
41 ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data);
42 printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
43 data);
44 ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
45 printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
46 data);
47 ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
48 printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
49 qdev->ndev->name, data);
50 ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
51 printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
52 qdev->ndev->name, data);
53 ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
54 printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
55 data);
56 ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
57 printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
58 data);
59 ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
60 printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
61 data);
62 ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
63 printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
64 qdev->ndev->name, data);
65 ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
66 printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
67 data);
68 ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
69 printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
70 qdev->ndev->name, data);
71 ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
72 printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
73 data);
74 ql_sem_unlock(qdev, qdev->xg_sem_mask);
75
76}
77
78static void ql_dump_ets_regs(struct ql_adapter *qdev)
79{
80}
81
82static void ql_dump_cam_entries(struct ql_adapter *qdev)
83{
84 int i;
85 u32 value[3];
86 for (i = 0; i < 4; i++) {
87 if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
88 printk(KERN_ERR PFX
89 "%s: Failed read of mac index register.\n",
90 __func__);
91 return;
92 } else {
93 if (value[0])
94 printk(KERN_ERR PFX
95 "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n",
96 qdev->ndev->name, i, value[1], value[0],
97 value[2]);
98 }
99 }
100 for (i = 0; i < 32; i++) {
101 if (ql_get_mac_addr_reg
102 (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
103 printk(KERN_ERR PFX
104 "%s: Failed read of mac index register.\n",
105 __func__);
106 return;
107 } else {
108 if (value[0])
109 printk(KERN_ERR PFX
110 "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n",
111 qdev->ndev->name, i, value[1], value[0]);
112 }
113 }
114}
115
116void ql_dump_routing_entries(struct ql_adapter *qdev)
117{
118 int i;
119 u32 value;
120 for (i = 0; i < 16; i++) {
121 value = 0;
122 if (ql_get_routing_reg(qdev, i, &value)) {
123 printk(KERN_ERR PFX
124 "%s: Failed read of routing index register.\n",
125 __func__);
126 return;
127 } else {
128 if (value)
129 printk(KERN_ERR PFX
130 "%s: Routing Mask %d = 0x%.08x.\n",
131 qdev->ndev->name, i, value);
132 }
133 }
134}
135
136void ql_dump_regs(struct ql_adapter *qdev)
137{
138 printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func);
139 printk(KERN_ERR PFX "SYS = 0x%x.\n",
140 ql_read32(qdev, SYS));
141 printk(KERN_ERR PFX "RST_FO = 0x%x.\n",
142 ql_read32(qdev, RST_FO));
143 printk(KERN_ERR PFX "FSC = 0x%x.\n",
144 ql_read32(qdev, FSC));
145 printk(KERN_ERR PFX "CSR = 0x%x.\n",
146 ql_read32(qdev, CSR));
147 printk(KERN_ERR PFX "ICB_RID = 0x%x.\n",
148 ql_read32(qdev, ICB_RID));
149 printk(KERN_ERR PFX "ICB_L = 0x%x.\n",
150 ql_read32(qdev, ICB_L));
151 printk(KERN_ERR PFX "ICB_H = 0x%x.\n",
152 ql_read32(qdev, ICB_H));
153 printk(KERN_ERR PFX "CFG = 0x%x.\n",
154 ql_read32(qdev, CFG));
155 printk(KERN_ERR PFX "BIOS_ADDR = 0x%x.\n",
156 ql_read32(qdev, BIOS_ADDR));
157 printk(KERN_ERR PFX "STS = 0x%x.\n",
158 ql_read32(qdev, STS));
159 printk(KERN_ERR PFX "INTR_EN = 0x%x.\n",
160 ql_read32(qdev, INTR_EN));
161 printk(KERN_ERR PFX "INTR_MASK = 0x%x.\n",
162 ql_read32(qdev, INTR_MASK));
163 printk(KERN_ERR PFX "ISR1 = 0x%x.\n",
164 ql_read32(qdev, ISR1));
165 printk(KERN_ERR PFX "ISR2 = 0x%x.\n",
166 ql_read32(qdev, ISR2));
167 printk(KERN_ERR PFX "ISR3 = 0x%x.\n",
168 ql_read32(qdev, ISR3));
169 printk(KERN_ERR PFX "ISR4 = 0x%x.\n",
170 ql_read32(qdev, ISR4));
171 printk(KERN_ERR PFX "REV_ID = 0x%x.\n",
172 ql_read32(qdev, REV_ID));
173 printk(KERN_ERR PFX "FRC_ECC_ERR = 0x%x.\n",
174 ql_read32(qdev, FRC_ECC_ERR));
175 printk(KERN_ERR PFX "ERR_STS = 0x%x.\n",
176 ql_read32(qdev, ERR_STS));
177 printk(KERN_ERR PFX "RAM_DBG_ADDR = 0x%x.\n",
178 ql_read32(qdev, RAM_DBG_ADDR));
179 printk(KERN_ERR PFX "RAM_DBG_DATA = 0x%x.\n",
180 ql_read32(qdev, RAM_DBG_DATA));
181 printk(KERN_ERR PFX "ECC_ERR_CNT = 0x%x.\n",
182 ql_read32(qdev, ECC_ERR_CNT));
183 printk(KERN_ERR PFX "SEM = 0x%x.\n",
184 ql_read32(qdev, SEM));
185 printk(KERN_ERR PFX "GPIO_1 = 0x%x.\n",
186 ql_read32(qdev, GPIO_1));
187 printk(KERN_ERR PFX "GPIO_2 = 0x%x.\n",
188 ql_read32(qdev, GPIO_2));
189 printk(KERN_ERR PFX "GPIO_3 = 0x%x.\n",
190 ql_read32(qdev, GPIO_3));
191 printk(KERN_ERR PFX "XGMAC_ADDR = 0x%x.\n",
192 ql_read32(qdev, XGMAC_ADDR));
193 printk(KERN_ERR PFX "XGMAC_DATA = 0x%x.\n",
194 ql_read32(qdev, XGMAC_DATA));
195 printk(KERN_ERR PFX "NIC_ETS = 0x%x.\n",
196 ql_read32(qdev, NIC_ETS));
197 printk(KERN_ERR PFX "CNA_ETS = 0x%x.\n",
198 ql_read32(qdev, CNA_ETS));
199 printk(KERN_ERR PFX "FLASH_ADDR = 0x%x.\n",
200 ql_read32(qdev, FLASH_ADDR));
201 printk(KERN_ERR PFX "FLASH_DATA = 0x%x.\n",
202 ql_read32(qdev, FLASH_DATA));
203 printk(KERN_ERR PFX "CQ_STOP = 0x%x.\n",
204 ql_read32(qdev, CQ_STOP));
205 printk(KERN_ERR PFX "PAGE_TBL_RID = 0x%x.\n",
206 ql_read32(qdev, PAGE_TBL_RID));
207 printk(KERN_ERR PFX "WQ_PAGE_TBL_LO = 0x%x.\n",
208 ql_read32(qdev, WQ_PAGE_TBL_LO));
209 printk(KERN_ERR PFX "WQ_PAGE_TBL_HI = 0x%x.\n",
210 ql_read32(qdev, WQ_PAGE_TBL_HI));
211 printk(KERN_ERR PFX "CQ_PAGE_TBL_LO = 0x%x.\n",
212 ql_read32(qdev, CQ_PAGE_TBL_LO));
213 printk(KERN_ERR PFX "CQ_PAGE_TBL_HI = 0x%x.\n",
214 ql_read32(qdev, CQ_PAGE_TBL_HI));
215 printk(KERN_ERR PFX "COS_DFLT_CQ1 = 0x%x.\n",
216 ql_read32(qdev, COS_DFLT_CQ1));
217 printk(KERN_ERR PFX "COS_DFLT_CQ2 = 0x%x.\n",
218 ql_read32(qdev, COS_DFLT_CQ2));
219 printk(KERN_ERR PFX "SPLT_HDR = 0x%x.\n",
220 ql_read32(qdev, SPLT_HDR));
221 printk(KERN_ERR PFX "FC_PAUSE_THRES = 0x%x.\n",
222 ql_read32(qdev, FC_PAUSE_THRES));
223 printk(KERN_ERR PFX "NIC_PAUSE_THRES = 0x%x.\n",
224 ql_read32(qdev, NIC_PAUSE_THRES));
225 printk(KERN_ERR PFX "FC_ETHERTYPE = 0x%x.\n",
226 ql_read32(qdev, FC_ETHERTYPE));
227 printk(KERN_ERR PFX "FC_RCV_CFG = 0x%x.\n",
228 ql_read32(qdev, FC_RCV_CFG));
229 printk(KERN_ERR PFX "NIC_RCV_CFG = 0x%x.\n",
230 ql_read32(qdev, NIC_RCV_CFG));
231 printk(KERN_ERR PFX "FC_COS_TAGS = 0x%x.\n",
232 ql_read32(qdev, FC_COS_TAGS));
233 printk(KERN_ERR PFX "NIC_COS_TAGS = 0x%x.\n",
234 ql_read32(qdev, NIC_COS_TAGS));
235 printk(KERN_ERR PFX "MGMT_RCV_CFG = 0x%x.\n",
236 ql_read32(qdev, MGMT_RCV_CFG));
237 printk(KERN_ERR PFX "XG_SERDES_ADDR = 0x%x.\n",
238 ql_read32(qdev, XG_SERDES_ADDR));
239 printk(KERN_ERR PFX "XG_SERDES_DATA = 0x%x.\n",
240 ql_read32(qdev, XG_SERDES_DATA));
241 printk(KERN_ERR PFX "PRB_MX_ADDR = 0x%x.\n",
242 ql_read32(qdev, PRB_MX_ADDR));
243 printk(KERN_ERR PFX "PRB_MX_DATA = 0x%x.\n",
244 ql_read32(qdev, PRB_MX_DATA));
245 ql_dump_intr_states(qdev);
246 ql_dump_xgmac_control_regs(qdev);
247 ql_dump_ets_regs(qdev);
248 ql_dump_cam_entries(qdev);
249 ql_dump_routing_entries(qdev);
250}
251#endif
252
253#ifdef QL_STAT_DUMP
254void ql_dump_stat(struct ql_adapter *qdev)
255{
256 printk(KERN_ERR "%s: Enter.\n", __func__);
257 printk(KERN_ERR "tx_pkts = %ld\n",
258 (unsigned long)qdev->nic_stats.tx_pkts);
259 printk(KERN_ERR "tx_bytes = %ld\n",
260 (unsigned long)qdev->nic_stats.tx_bytes);
261 printk(KERN_ERR "tx_mcast_pkts = %ld.\n",
262 (unsigned long)qdev->nic_stats.tx_mcast_pkts);
263 printk(KERN_ERR "tx_bcast_pkts = %ld.\n",
264 (unsigned long)qdev->nic_stats.tx_bcast_pkts);
265 printk(KERN_ERR "tx_ucast_pkts = %ld.\n",
266 (unsigned long)qdev->nic_stats.tx_ucast_pkts);
267 printk(KERN_ERR "tx_ctl_pkts = %ld.\n",
268 (unsigned long)qdev->nic_stats.tx_ctl_pkts);
269 printk(KERN_ERR "tx_pause_pkts = %ld.\n",
270 (unsigned long)qdev->nic_stats.tx_pause_pkts);
271 printk(KERN_ERR "tx_64_pkt = %ld.\n",
272 (unsigned long)qdev->nic_stats.tx_64_pkt);
273 printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n",
274 (unsigned long)qdev->nic_stats.tx_65_to_127_pkt);
275 printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n",
276 (unsigned long)qdev->nic_stats.tx_128_to_255_pkt);
277 printk(KERN_ERR "tx_256_511_pkt = %ld.\n",
278 (unsigned long)qdev->nic_stats.tx_256_511_pkt);
279 printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n",
280 (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt);
281 printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n",
282 (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt);
283 printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n",
284 (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt);
285 printk(KERN_ERR "tx_undersize_pkt = %ld.\n",
286 (unsigned long)qdev->nic_stats.tx_undersize_pkt);
287 printk(KERN_ERR "tx_oversize_pkt = %ld.\n",
288 (unsigned long)qdev->nic_stats.tx_oversize_pkt);
289 printk(KERN_ERR "rx_bytes = %ld.\n",
290 (unsigned long)qdev->nic_stats.rx_bytes);
291 printk(KERN_ERR "rx_bytes_ok = %ld.\n",
292 (unsigned long)qdev->nic_stats.rx_bytes_ok);
293 printk(KERN_ERR "rx_pkts = %ld.\n",
294 (unsigned long)qdev->nic_stats.rx_pkts);
295 printk(KERN_ERR "rx_pkts_ok = %ld.\n",
296 (unsigned long)qdev->nic_stats.rx_pkts_ok);
297 printk(KERN_ERR "rx_bcast_pkts = %ld.\n",
298 (unsigned long)qdev->nic_stats.rx_bcast_pkts);
299 printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
300 (unsigned long)qdev->nic_stats.rx_mcast_pkts);
301 printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
302 (unsigned long)qdev->nic_stats.rx_ucast_pkts);
303 printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
304 (unsigned long)qdev->nic_stats.rx_undersize_pkts);
305 printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
306 (unsigned long)qdev->nic_stats.rx_oversize_pkts);
307 printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
308 (unsigned long)qdev->nic_stats.rx_jabber_pkts);
309 printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
310 (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
311 printk(KERN_ERR "rx_drop_events = %ld.\n",
312 (unsigned long)qdev->nic_stats.rx_drop_events);
313 printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
314 (unsigned long)qdev->nic_stats.rx_fcerr_pkts);
315 printk(KERN_ERR "rx_align_err = %ld.\n",
316 (unsigned long)qdev->nic_stats.rx_align_err);
317 printk(KERN_ERR "rx_symbol_err = %ld.\n",
318 (unsigned long)qdev->nic_stats.rx_symbol_err);
319 printk(KERN_ERR "rx_mac_err = %ld.\n",
320 (unsigned long)qdev->nic_stats.rx_mac_err);
321 printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
322 (unsigned long)qdev->nic_stats.rx_ctl_pkts);
323 printk(KERN_ERR "rx_pause_pkts = %ld.\n",
324 (unsigned long)qdev->nic_stats.rx_pause_pkts);
325 printk(KERN_ERR "rx_64_pkts = %ld.\n",
326 (unsigned long)qdev->nic_stats.rx_64_pkts);
327 printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
328 (unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
329 printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
330 (unsigned long)qdev->nic_stats.rx_128_255_pkts);
331 printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
332 (unsigned long)qdev->nic_stats.rx_256_511_pkts);
333 printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
334 (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
335 printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
336 (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
337 printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
338 (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
339 printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
340 (unsigned long)qdev->nic_stats.rx_len_err_pkts);
341};
342#endif
343
344#ifdef QL_DEV_DUMP
345void ql_dump_qdev(struct ql_adapter *qdev)
346{
347 int i;
348 printk(KERN_ERR PFX "qdev->flags = %lx.\n",
349 qdev->flags);
350 printk(KERN_ERR PFX "qdev->vlgrp = %p.\n",
351 qdev->vlgrp);
352 printk(KERN_ERR PFX "qdev->pdev = %p.\n",
353 qdev->pdev);
354 printk(KERN_ERR PFX "qdev->ndev = %p.\n",
355 qdev->ndev);
356 printk(KERN_ERR PFX "qdev->chip_rev_id = %d.\n",
357 qdev->chip_rev_id);
358 printk(KERN_ERR PFX "qdev->reg_base = %p.\n",
359 qdev->reg_base);
360 printk(KERN_ERR PFX "qdev->doorbell_area = %p.\n",
361 qdev->doorbell_area);
362 printk(KERN_ERR PFX "qdev->doorbell_area_size = %d.\n",
363 qdev->doorbell_area_size);
364 printk(KERN_ERR PFX "msg_enable = %x.\n",
365 qdev->msg_enable);
366 printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area = %p.\n",
367 qdev->rx_ring_shadow_reg_area);
368 printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma = %p.\n",
369 (void *)qdev->rx_ring_shadow_reg_dma);
370 printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area = %p.\n",
371 qdev->tx_ring_shadow_reg_area);
372 printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma = %p.\n",
373 (void *)qdev->tx_ring_shadow_reg_dma);
374 printk(KERN_ERR PFX "qdev->intr_count = %d.\n",
375 qdev->intr_count);
376 if (qdev->msi_x_entry)
377 for (i = 0; i < qdev->intr_count; i++) {
378 printk(KERN_ERR PFX
379 "msi_x_entry.[%d]vector = %d.\n", i,
380 qdev->msi_x_entry[i].vector);
381 printk(KERN_ERR PFX
382 "msi_x_entry.[%d]entry = %d.\n", i,
383 qdev->msi_x_entry[i].entry);
384 }
385 for (i = 0; i < qdev->intr_count; i++) {
386 printk(KERN_ERR PFX
387 "intr_context[%d].qdev = %p.\n", i,
388 qdev->intr_context[i].qdev);
389 printk(KERN_ERR PFX
390 "intr_context[%d].intr = %d.\n", i,
391 qdev->intr_context[i].intr);
392 printk(KERN_ERR PFX
393 "intr_context[%d].hooked = %d.\n", i,
394 qdev->intr_context[i].hooked);
395 printk(KERN_ERR PFX
396 "intr_context[%d].intr_en_mask = 0x%08x.\n", i,
397 qdev->intr_context[i].intr_en_mask);
398 printk(KERN_ERR PFX
399 "intr_context[%d].intr_dis_mask = 0x%08x.\n", i,
400 qdev->intr_context[i].intr_dis_mask);
401 printk(KERN_ERR PFX
402 "intr_context[%d].intr_read_mask = 0x%08x.\n", i,
403 qdev->intr_context[i].intr_read_mask);
404 }
405 printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count);
406 printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count);
407 printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size);
408 printk(KERN_ERR PFX "qdev->ring_mem = %p.\n", qdev->ring_mem);
409 printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count);
410 printk(KERN_ERR PFX "qdev->tx_ring = %p.\n",
411 qdev->tx_ring);
412 printk(KERN_ERR PFX "qdev->rss_ring_first_cq_id = %d.\n",
413 qdev->rss_ring_first_cq_id);
414 printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n",
415 qdev->rss_ring_count);
416 printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring);
417 printk(KERN_ERR PFX "qdev->default_rx_queue = %d.\n",
418 qdev->default_rx_queue);
419 printk(KERN_ERR PFX "qdev->xg_sem_mask = 0x%08x.\n",
420 qdev->xg_sem_mask);
421 printk(KERN_ERR PFX "qdev->port_link_up = 0x%08x.\n",
422 qdev->port_link_up);
423 printk(KERN_ERR PFX "qdev->port_init = 0x%08x.\n",
424 qdev->port_init);
425
426}
427#endif
428
429#ifdef QL_CB_DUMP
430void ql_dump_wqicb(struct wqicb *wqicb)
431{
432 printk(KERN_ERR PFX "Dumping wqicb stuff...\n");
433 printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len));
434 printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags));
435 printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
436 le16_to_cpu(wqicb->cq_id_rss));
437 printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
438 printk(KERN_ERR PFX "wqicb->wq_addr_lo = 0x%.08x.\n",
439 le32_to_cpu(wqicb->addr_lo));
440 printk(KERN_ERR PFX "wqicb->wq_addr_hi = 0x%.08x.\n",
441 le32_to_cpu(wqicb->addr_hi));
442 printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_lo = 0x%.08x.\n",
443 le32_to_cpu(wqicb->cnsmr_idx_addr_lo));
444 printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_hi = 0x%.08x.\n",
445 le32_to_cpu(wqicb->cnsmr_idx_addr_hi));
446}
447
448void ql_dump_tx_ring(struct tx_ring *tx_ring)
449{
450 if (tx_ring == NULL)
451 return;
452 printk(KERN_ERR PFX
453 "===================== Dumping tx_ring %d ===============.\n",
454 tx_ring->wq_id);
455 printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
456 printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
457 (u64) tx_ring->wq_base_dma);
458 printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg = %p.\n",
459 tx_ring->cnsmr_idx_sh_reg);
460 printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg_dma = 0x%llx.\n",
461 (u64) tx_ring->cnsmr_idx_sh_reg_dma);
462 printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
463 printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
464 printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
465 tx_ring->prod_idx_db_reg);
466 printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n",
467 tx_ring->valid_db_reg);
468 printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx);
469 printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id);
470 printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id);
471 printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
472 printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
473 atomic_read(&tx_ring->tx_count));
474}
475
476void ql_dump_ricb(struct ricb *ricb)
477{
478 int i;
479 printk(KERN_ERR PFX
480 "===================== Dumping ricb ===============.\n");
481 printk(KERN_ERR PFX "Dumping ricb stuff...\n");
482
483 printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f);
484 printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n",
485 ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
486 ricb->flags & RSS_L6K ? "RSS_L6K " : "",
487 ricb->flags & RSS_LI ? "RSS_LI " : "",
488 ricb->flags & RSS_LB ? "RSS_LB " : "",
489 ricb->flags & RSS_LM ? "RSS_LM " : "",
490 ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
491 ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
492 ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
493 ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
494 printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask));
495 for (i = 0; i < 16; i++)
496 printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i,
497 le32_to_cpu(ricb->hash_cq_id[i]));
498 for (i = 0; i < 10; i++)
499 printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i,
500 le32_to_cpu(ricb->ipv6_hash_key[i]));
501 for (i = 0; i < 4; i++)
502 printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i,
503 le32_to_cpu(ricb->ipv4_hash_key[i]));
504}
505
506void ql_dump_cqicb(struct cqicb *cqicb)
507{
508 printk(KERN_ERR PFX "Dumping cqicb stuff...\n");
509
510 printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
511 printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
512 printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
513 printk(KERN_ERR PFX "cqicb->addr_lo = %x.\n",
514 le32_to_cpu(cqicb->addr_lo));
515 printk(KERN_ERR PFX "cqicb->addr_hi = %x.\n",
516 le32_to_cpu(cqicb->addr_hi));
517 printk(KERN_ERR PFX "cqicb->prod_idx_addr_lo = %x.\n",
518 le32_to_cpu(cqicb->prod_idx_addr_lo));
519 printk(KERN_ERR PFX "cqicb->prod_idx_addr_hi = %x.\n",
520 le32_to_cpu(cqicb->prod_idx_addr_hi));
521 printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
522 le16_to_cpu(cqicb->pkt_delay));
523 printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
524 le16_to_cpu(cqicb->irq_delay));
525 printk(KERN_ERR PFX "cqicb->lbq_addr_lo = %x.\n",
526 le32_to_cpu(cqicb->lbq_addr_lo));
527 printk(KERN_ERR PFX "cqicb->lbq_addr_hi = %x.\n",
528 le32_to_cpu(cqicb->lbq_addr_hi));
529 printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
530 le16_to_cpu(cqicb->lbq_buf_size));
531 printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
532 le16_to_cpu(cqicb->lbq_len));
533 printk(KERN_ERR PFX "cqicb->sbq_addr_lo = %x.\n",
534 le32_to_cpu(cqicb->sbq_addr_lo));
535 printk(KERN_ERR PFX "cqicb->sbq_addr_hi = %x.\n",
536 le32_to_cpu(cqicb->sbq_addr_hi));
537 printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
538 le16_to_cpu(cqicb->sbq_buf_size));
539 printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
540 le16_to_cpu(cqicb->sbq_len));
541}
542
543void ql_dump_rx_ring(struct rx_ring *rx_ring)
544{
545 if (rx_ring == NULL)
546 return;
547 printk(KERN_ERR PFX
548 "===================== Dumping rx_ring %d ===============.\n",
549 rx_ring->cq_id);
550 printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n",
551 rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
552 rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
553 rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
554 printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb);
555 printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base);
556 printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n",
557 (u64) rx_ring->cq_base_dma);
558 printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
559 printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
560 printk(KERN_ERR PFX
561 "rx_ring->prod_idx_sh_reg, addr = %p, value = %d.\n",
562 rx_ring->prod_idx_sh_reg,
563 rx_ring->prod_idx_sh_reg ? *(rx_ring->prod_idx_sh_reg) : 0);
564 printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
565 (u64) rx_ring->prod_idx_sh_reg_dma);
566 printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
567 rx_ring->cnsmr_idx_db_reg);
568 printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx);
569 printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry);
570 printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n",
571 rx_ring->valid_db_reg);
572
573 printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base);
574 printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n",
575 (u64) rx_ring->lbq_base_dma);
576 printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n",
577 rx_ring->lbq_base_indirect);
578 printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n",
579 (u64) rx_ring->lbq_base_indirect_dma);
580 printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq);
581 printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len);
582 printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size);
583 printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n",
584 rx_ring->lbq_prod_idx_db_reg);
585 printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n",
586 rx_ring->lbq_prod_idx);
587 printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n",
588 rx_ring->lbq_curr_idx);
589 printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n",
590 rx_ring->lbq_clean_idx);
591 printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n",
592 rx_ring->lbq_free_cnt);
593 printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n",
594 rx_ring->lbq_buf_size);
595
596 printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
597 printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
598 (u64) rx_ring->sbq_base_dma);
599 printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n",
600 rx_ring->sbq_base_indirect);
601 printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n",
602 (u64) rx_ring->sbq_base_indirect_dma);
603 printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq);
604 printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len);
605 printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size);
606 printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n",
607 rx_ring->sbq_prod_idx_db_reg);
608 printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n",
609 rx_ring->sbq_prod_idx);
610 printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n",
611 rx_ring->sbq_curr_idx);
612 printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n",
613 rx_ring->sbq_clean_idx);
614 printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n",
615 rx_ring->sbq_free_cnt);
616 printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n",
617 rx_ring->sbq_buf_size);
618 printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
619 printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
620 printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
621 printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
622}
623
624void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
625{
626 void *ptr;
627
628 printk(KERN_ERR PFX "%s: Enter.\n", __func__);
629
630 ptr = kmalloc(size, GFP_ATOMIC);
631 if (ptr == NULL) {
632 printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n",
633 __func__);
634 return;
635 }
636
637 if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
638 printk(KERN_ERR "%s: Failed to upload control block!\n",
639 __func__);
640 goto fail_it;
641 }
642 switch (bit) {
643 case CFG_DRQ:
644 ql_dump_wqicb((struct wqicb *)ptr);
645 break;
646 case CFG_DCQ:
647 ql_dump_cqicb((struct cqicb *)ptr);
648 break;
649 case CFG_DR:
650 ql_dump_ricb((struct ricb *)ptr);
651 break;
652 default:
653 printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n",
654 __func__, bit);
655 break;
656 }
657fail_it:
658 kfree(ptr);
659}
660#endif
661
662#ifdef QL_OB_DUMP
663void ql_dump_tx_desc(struct tx_buf_desc *tbd)
664{
665 printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
666 le64_to_cpu((u64) tbd->addr));
667 printk(KERN_ERR PFX "tbd->len = %d\n",
668 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
669 printk(KERN_ERR PFX "tbd->flags = %s %s\n",
670 tbd->len & TX_DESC_C ? "C" : ".",
671 tbd->len & TX_DESC_E ? "E" : ".");
672 tbd++;
673 printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
674 le64_to_cpu((u64) tbd->addr));
675 printk(KERN_ERR PFX "tbd->len = %d\n",
676 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
677 printk(KERN_ERR PFX "tbd->flags = %s %s\n",
678 tbd->len & TX_DESC_C ? "C" : ".",
679 tbd->len & TX_DESC_E ? "E" : ".");
680 tbd++;
681 printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
682 le64_to_cpu((u64) tbd->addr));
683 printk(KERN_ERR PFX "tbd->len = %d\n",
684 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
685 printk(KERN_ERR PFX "tbd->flags = %s %s\n",
686 tbd->len & TX_DESC_C ? "C" : ".",
687 tbd->len & TX_DESC_E ? "E" : ".");
688
689}
690
691void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
692{
693 struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
694 (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
695 struct tx_buf_desc *tbd;
696 u16 frame_len;
697
698 printk(KERN_ERR PFX "%s\n", __func__);
699 printk(KERN_ERR PFX "opcode = %s\n",
700 (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
701 printk(KERN_ERR PFX "flags1 = %s %s %s %s %s\n",
702 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
703 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
704 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
705 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
706 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
707 printk(KERN_ERR PFX "flags2 = %s %s %s\n",
708 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
709 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
710 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
711 printk(KERN_ERR PFX "flags3 = %s %s %s \n",
712 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
713 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
714 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
715 printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid);
716 printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx);
717 printk(KERN_ERR PFX "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
718 if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
719 printk(KERN_ERR PFX "frame_len = %d\n",
720 le32_to_cpu(ob_mac_tso_iocb->frame_len));
721 printk(KERN_ERR PFX "mss = %d\n",
722 le16_to_cpu(ob_mac_tso_iocb->mss));
723 printk(KERN_ERR PFX "prot_hdr_len = %d\n",
724 le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
725 printk(KERN_ERR PFX "hdr_offset = 0x%.04x\n",
726 le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
727 frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
728 } else {
729 printk(KERN_ERR PFX "frame_len = %d\n",
730 le16_to_cpu(ob_mac_iocb->frame_len));
731 frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
732 }
733 tbd = &ob_mac_iocb->tbd[0];
734 ql_dump_tx_desc(tbd);
735}
736
737void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
738{
739 printk(KERN_ERR PFX "%s\n", __func__);
740 printk(KERN_ERR PFX "opcode = %d\n", ob_mac_rsp->opcode);
741 printk(KERN_ERR PFX "flags = %s %s %s %s %s %s %s\n",
742 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
743 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
744 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
745 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
746 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
747 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
748 ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
749 printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid);
750}
751#endif
752
753#ifdef QL_IB_DUMP
754void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
755{
756 printk(KERN_ERR PFX "%s\n", __func__);
757 printk(KERN_ERR PFX "opcode = 0x%x\n", ib_mac_rsp->opcode);
758 printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n",
759 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
760 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
761 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
762 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
763 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
764 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
765
766 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
767 printk(KERN_ERR PFX "%s%s%s Multicast.\n",
768 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
769 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
770 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
771 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
772 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
773 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
774
775 printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n",
776 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
777 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
778 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
779 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
780 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
781
782 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
783 printk(KERN_ERR PFX "%s%s%s%s%s error.\n",
784 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
785 IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
786 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
787 IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
788 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
789 IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
790 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
791 IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
792 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
793 IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
794
795 printk(KERN_ERR PFX "flags3 = %s%s.\n",
796 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
797 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
798
799 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
800 printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n",
801 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
802 IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
803 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
804 IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
805 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
806 IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
807 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
808 IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
809
810 printk(KERN_ERR PFX "data_len = %d\n",
811 le32_to_cpu(ib_mac_rsp->data_len));
812 printk(KERN_ERR PFX "data_addr_hi = 0x%x\n",
813 le32_to_cpu(ib_mac_rsp->data_addr_hi));
814 printk(KERN_ERR PFX "data_addr_lo = 0x%x\n",
815 le32_to_cpu(ib_mac_rsp->data_addr_lo));
816 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
817 printk(KERN_ERR PFX "rss = %x\n",
818 le32_to_cpu(ib_mac_rsp->rss));
819 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
820 printk(KERN_ERR PFX "vlan_id = %x\n",
821 le16_to_cpu(ib_mac_rsp->vlan_id));
822
823 printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
824 le32_to_cpu(ib_mac_rsp->
825 flags4) & IB_MAC_IOCB_RSP_HV ? "HV " : "",
826 le32_to_cpu(ib_mac_rsp->
827 flags4) & IB_MAC_IOCB_RSP_HS ? "HS " : "",
828 le32_to_cpu(ib_mac_rsp->
829 flags4) & IB_MAC_IOCB_RSP_HL ? "HL " : "");
830
831 if (le32_to_cpu(ib_mac_rsp->flags4) & IB_MAC_IOCB_RSP_HV) {
832 printk(KERN_ERR PFX "hdr length = %d.\n",
833 le32_to_cpu(ib_mac_rsp->hdr_len));
834 printk(KERN_ERR PFX "hdr addr_hi = 0x%x.\n",
835 le32_to_cpu(ib_mac_rsp->hdr_addr_hi));
836 printk(KERN_ERR PFX "hdr addr_lo = 0x%x.\n",
837 le32_to_cpu(ib_mac_rsp->hdr_addr_lo));
838 }
839}
840#endif
841
842#ifdef QL_ALL_DUMP
843void ql_dump_all(struct ql_adapter *qdev)
844{
845 int i;
846
847 QL_DUMP_REGS(qdev);
848 QL_DUMP_QDEV(qdev);
849 for (i = 0; i < qdev->tx_ring_count; i++) {
850 QL_DUMP_TX_RING(&qdev->tx_ring[i]);
851 QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
852 }
853 for (i = 0; i < qdev->rx_ring_count; i++) {
854 QL_DUMP_RX_RING(&qdev->rx_ring[i]);
855 QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
856 }
857}
858#endif
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
new file mode 100644
index 000000000000..6457f8c4fdaa
--- /dev/null
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -0,0 +1,415 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/types.h>
4#include <linux/module.h>
5#include <linux/list.h>
6#include <linux/pci.h>
7#include <linux/dma-mapping.h>
8#include <linux/pagemap.h>
9#include <linux/sched.h>
10#include <linux/slab.h>
11#include <linux/dmapool.h>
12#include <linux/mempool.h>
13#include <linux/spinlock.h>
14#include <linux/kthread.h>
15#include <linux/interrupt.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/in.h>
19#include <linux/ip.h>
20#include <linux/ipv6.h>
21#include <net/ipv6.h>
22#include <linux/tcp.h>
23#include <linux/udp.h>
24#include <linux/if_arp.h>
25#include <linux/if_ether.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/skbuff.h>
30#include <linux/rtnetlink.h>
31#include <linux/if_vlan.h>
32#include <linux/init.h>
33#include <linux/delay.h>
34#include <linux/mm.h>
35#include <linux/vmalloc.h>
36
37#include <linux/version.h>
38
39#include "qlge.h"
40
41static int ql_update_ring_coalescing(struct ql_adapter *qdev)
42{
43 int i, status = 0;
44 struct rx_ring *rx_ring;
45 struct cqicb *cqicb;
46
47 if (!netif_running(qdev->ndev))
48 return status;
49
50 spin_lock(&qdev->hw_lock);
51 /* Skip the default queue, and update the outbound handler
52 * queues if they changed.
53 */
54 cqicb = (struct cqicb *)&qdev->rx_ring[1];
55 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
56 le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) {
57 for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) {
58 rx_ring = &qdev->rx_ring[i];
59 cqicb = (struct cqicb *)rx_ring;
60 cqicb->irq_delay = le16_to_cpu(qdev->tx_coalesce_usecs);
61 cqicb->pkt_delay =
62 le16_to_cpu(qdev->tx_max_coalesced_frames);
63 cqicb->flags = FLAGS_LI;
64 status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
65 CFG_LCQ, rx_ring->cq_id);
66 if (status) {
67 QPRINTK(qdev, IFUP, ERR,
68 "Failed to load CQICB.\n");
69 goto exit;
70 }
71 }
72 }
73
74 /* Update the inbound (RSS) handler queues if they changed. */
75 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_first_cq_id];
76 if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
77 le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) {
78 for (i = qdev->rss_ring_first_cq_id;
79 i <= qdev->rss_ring_first_cq_id + qdev->rss_ring_count;
80 i++) {
81 rx_ring = &qdev->rx_ring[i];
82 cqicb = (struct cqicb *)rx_ring;
83 cqicb->irq_delay = le16_to_cpu(qdev->rx_coalesce_usecs);
84 cqicb->pkt_delay =
85 le16_to_cpu(qdev->rx_max_coalesced_frames);
86 cqicb->flags = FLAGS_LI;
87 status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
88 CFG_LCQ, rx_ring->cq_id);
89 if (status) {
90 QPRINTK(qdev, IFUP, ERR,
91 "Failed to load CQICB.\n");
92 goto exit;
93 }
94 }
95 }
96exit:
97 spin_unlock(&qdev->hw_lock);
98 return status;
99}
100
101void ql_update_stats(struct ql_adapter *qdev)
102{
103 u32 i;
104 u64 data;
105 u64 *iter = &qdev->nic_stats.tx_pkts;
106
107 spin_lock(&qdev->stats_lock);
108 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
109 QPRINTK(qdev, DRV, ERR,
110 "Couldn't get xgmac sem.\n");
111 goto quit;
112 }
113 /*
114 * Get TX statistics.
115 */
116 for (i = 0x200; i < 0x280; i += 8) {
117 if (ql_read_xgmac_reg64(qdev, i, &data)) {
118 QPRINTK(qdev, DRV, ERR,
119 "Error reading status register 0x%.04x.\n", i);
120 goto end;
121 } else
122 *iter = data;
123 iter++;
124 }
125
126 /*
127 * Get RX statistics.
128 */
129 for (i = 0x300; i < 0x3d0; i += 8) {
130 if (ql_read_xgmac_reg64(qdev, i, &data)) {
131 QPRINTK(qdev, DRV, ERR,
132 "Error reading status register 0x%.04x.\n", i);
133 goto end;
134 } else
135 *iter = data;
136 iter++;
137 }
138
139end:
140 ql_sem_unlock(qdev, qdev->xg_sem_mask);
141quit:
142 spin_unlock(&qdev->stats_lock);
143
144 QL_DUMP_STAT(qdev);
145
146 return;
147}
148
149static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
150 {"tx_pkts"},
151 {"tx_bytes"},
152 {"tx_mcast_pkts"},
153 {"tx_bcast_pkts"},
154 {"tx_ucast_pkts"},
155 {"tx_ctl_pkts"},
156 {"tx_pause_pkts"},
157 {"tx_64_pkts"},
158 {"tx_65_to_127_pkts"},
159 {"tx_128_to_255_pkts"},
160 {"tx_256_511_pkts"},
161 {"tx_512_to_1023_pkts"},
162 {"tx_1024_to_1518_pkts"},
163 {"tx_1519_to_max_pkts"},
164 {"tx_undersize_pkts"},
165 {"tx_oversize_pkts"},
166 {"rx_bytes"},
167 {"rx_bytes_ok"},
168 {"rx_pkts"},
169 {"rx_pkts_ok"},
170 {"rx_bcast_pkts"},
171 {"rx_mcast_pkts"},
172 {"rx_ucast_pkts"},
173 {"rx_undersize_pkts"},
174 {"rx_oversize_pkts"},
175 {"rx_jabber_pkts"},
176 {"rx_undersize_fcerr_pkts"},
177 {"rx_drop_events"},
178 {"rx_fcerr_pkts"},
179 {"rx_align_err"},
180 {"rx_symbol_err"},
181 {"rx_mac_err"},
182 {"rx_ctl_pkts"},
183 {"rx_pause_pkts"},
184 {"rx_64_pkts"},
185 {"rx_65_to_127_pkts"},
186 {"rx_128_255_pkts"},
187 {"rx_256_511_pkts"},
188 {"rx_512_to_1023_pkts"},
189 {"rx_1024_to_1518_pkts"},
190 {"rx_1519_to_max_pkts"},
191 {"rx_len_err_pkts"},
192};
193
194static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
195{
196 switch (stringset) {
197 case ETH_SS_STATS:
198 memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
199 break;
200 }
201}
202
203static int ql_get_sset_count(struct net_device *dev, int sset)
204{
205 switch (sset) {
206 case ETH_SS_STATS:
207 return ARRAY_SIZE(ql_stats_str_arr);
208 default:
209 return -EOPNOTSUPP;
210 }
211}
212
213static void
214ql_get_ethtool_stats(struct net_device *ndev,
215 struct ethtool_stats *stats, u64 *data)
216{
217 struct ql_adapter *qdev = netdev_priv(ndev);
218 struct nic_stats *s = &qdev->nic_stats;
219
220 ql_update_stats(qdev);
221
222 *data++ = s->tx_pkts;
223 *data++ = s->tx_bytes;
224 *data++ = s->tx_mcast_pkts;
225 *data++ = s->tx_bcast_pkts;
226 *data++ = s->tx_ucast_pkts;
227 *data++ = s->tx_ctl_pkts;
228 *data++ = s->tx_pause_pkts;
229 *data++ = s->tx_64_pkt;
230 *data++ = s->tx_65_to_127_pkt;
231 *data++ = s->tx_128_to_255_pkt;
232 *data++ = s->tx_256_511_pkt;
233 *data++ = s->tx_512_to_1023_pkt;
234 *data++ = s->tx_1024_to_1518_pkt;
235 *data++ = s->tx_1519_to_max_pkt;
236 *data++ = s->tx_undersize_pkt;
237 *data++ = s->tx_oversize_pkt;
238 *data++ = s->rx_bytes;
239 *data++ = s->rx_bytes_ok;
240 *data++ = s->rx_pkts;
241 *data++ = s->rx_pkts_ok;
242 *data++ = s->rx_bcast_pkts;
243 *data++ = s->rx_mcast_pkts;
244 *data++ = s->rx_ucast_pkts;
245 *data++ = s->rx_undersize_pkts;
246 *data++ = s->rx_oversize_pkts;
247 *data++ = s->rx_jabber_pkts;
248 *data++ = s->rx_undersize_fcerr_pkts;
249 *data++ = s->rx_drop_events;
250 *data++ = s->rx_fcerr_pkts;
251 *data++ = s->rx_align_err;
252 *data++ = s->rx_symbol_err;
253 *data++ = s->rx_mac_err;
254 *data++ = s->rx_ctl_pkts;
255 *data++ = s->rx_pause_pkts;
256 *data++ = s->rx_64_pkts;
257 *data++ = s->rx_65_to_127_pkts;
258 *data++ = s->rx_128_255_pkts;
259 *data++ = s->rx_256_511_pkts;
260 *data++ = s->rx_512_to_1023_pkts;
261 *data++ = s->rx_1024_to_1518_pkts;
262 *data++ = s->rx_1519_to_max_pkts;
263 *data++ = s->rx_len_err_pkts;
264}
265
266static int ql_get_settings(struct net_device *ndev,
267 struct ethtool_cmd *ecmd)
268{
269 struct ql_adapter *qdev = netdev_priv(ndev);
270
271 ecmd->supported = SUPPORTED_10000baseT_Full;
272 ecmd->advertising = ADVERTISED_10000baseT_Full;
273 ecmd->autoneg = AUTONEG_ENABLE;
274 ecmd->transceiver = XCVR_EXTERNAL;
275 if ((qdev->link_status & LINK_TYPE_MASK) == LINK_TYPE_10GBASET) {
276 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
277 ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
278 ecmd->port = PORT_TP;
279 } else {
280 ecmd->supported |= SUPPORTED_FIBRE;
281 ecmd->advertising |= ADVERTISED_FIBRE;
282 ecmd->port = PORT_FIBRE;
283 }
284
285 ecmd->speed = SPEED_10000;
286 ecmd->duplex = DUPLEX_FULL;
287
288 return 0;
289}
290
291static void ql_get_drvinfo(struct net_device *ndev,
292 struct ethtool_drvinfo *drvinfo)
293{
294 struct ql_adapter *qdev = netdev_priv(ndev);
295 strncpy(drvinfo->driver, qlge_driver_name, 32);
296 strncpy(drvinfo->version, qlge_driver_version, 32);
297 strncpy(drvinfo->fw_version, "N/A", 32);
298 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
299 drvinfo->n_stats = 0;
300 drvinfo->testinfo_len = 0;
301 drvinfo->regdump_len = 0;
302 drvinfo->eedump_len = 0;
303}
304
305static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
306{
307 struct ql_adapter *qdev = netdev_priv(dev);
308
309 c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
310 c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
311
312 /* This chip coalesces as follows:
313 * If a packet arrives, hold off interrupts until
314 * cqicb->int_delay expires, but if no other packets arrive don't
315 * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
316 * timer to coalesce on a frame basis. So, we have to take ethtool's
317 * max_coalesced_frames value and convert it to a delay in microseconds.
318 * We do this by using a basic thoughput of 1,000,000 frames per
319 * second @ (1024 bytes). This means one frame per usec. So it's a
320 * simple one to one ratio.
321 */
322 c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
323 c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
324
325 return 0;
326}
327
328static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
329{
330 struct ql_adapter *qdev = netdev_priv(ndev);
331
332 /* Validate user parameters. */
333 if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
334 return -EINVAL;
335 /* Don't wait more than 10 usec. */
336 if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
337 return -EINVAL;
338 if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
339 return -EINVAL;
340 if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
341 return -EINVAL;
342
343 /* Verify a change took place before updating the hardware. */
344 if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
345 qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
346 qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
347 qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
348 return 0;
349
350 qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
351 qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
352 qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
353 qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
354
355 return ql_update_ring_coalescing(qdev);
356}
357
358static u32 ql_get_rx_csum(struct net_device *netdev)
359{
360 struct ql_adapter *qdev = netdev_priv(netdev);
361 return qdev->rx_csum;
362}
363
364static int ql_set_rx_csum(struct net_device *netdev, uint32_t data)
365{
366 struct ql_adapter *qdev = netdev_priv(netdev);
367 qdev->rx_csum = data;
368 return 0;
369}
370
371static int ql_set_tso(struct net_device *ndev, uint32_t data)
372{
373
374 if (data) {
375 ndev->features |= NETIF_F_TSO;
376 ndev->features |= NETIF_F_TSO6;
377 } else {
378 ndev->features &= ~NETIF_F_TSO;
379 ndev->features &= ~NETIF_F_TSO6;
380 }
381 return 0;
382}
383
384static u32 ql_get_msglevel(struct net_device *ndev)
385{
386 struct ql_adapter *qdev = netdev_priv(ndev);
387 return qdev->msg_enable;
388}
389
390static void ql_set_msglevel(struct net_device *ndev, u32 value)
391{
392 struct ql_adapter *qdev = netdev_priv(ndev);
393 qdev->msg_enable = value;
394}
395
396const struct ethtool_ops qlge_ethtool_ops = {
397 .get_settings = ql_get_settings,
398 .get_drvinfo = ql_get_drvinfo,
399 .get_msglevel = ql_get_msglevel,
400 .set_msglevel = ql_set_msglevel,
401 .get_link = ethtool_op_get_link,
402 .get_rx_csum = ql_get_rx_csum,
403 .set_rx_csum = ql_set_rx_csum,
404 .get_tx_csum = ethtool_op_get_tx_csum,
405 .get_sg = ethtool_op_get_sg,
406 .set_sg = ethtool_op_set_sg,
407 .get_tso = ethtool_op_get_tso,
408 .set_tso = ql_set_tso,
409 .get_coalesce = ql_get_coalesce,
410 .set_coalesce = ql_set_coalesce,
411 .get_sset_count = ql_get_sset_count,
412 .get_strings = ql_get_strings,
413 .get_ethtool_stats = ql_get_ethtool_stats,
414};
415
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
new file mode 100644
index 000000000000..ad878e2b9ded
--- /dev/null
+++ b/drivers/net/qlge/qlge_main.c
@@ -0,0 +1,3954 @@
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
37#include <linux/rtnetlink.h>
38#include <linux/if_vlan.h>
39#include <linux/init.h>
40#include <linux/delay.h>
41#include <linux/mm.h>
42#include <linux/vmalloc.h>
43
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
61 NETIF_MSG_TX_QUEUED |
62 NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
63/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66static int debug = 0x00007fff; /* defaults above */
67module_param(debug, int, 0);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
73static int irq_type = MSIX_IRQ;
74module_param(irq_type, int, MSIX_IRQ);
75MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
79 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
80 /* required last entry */
81 {0,}
82};
83
84MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
85
86/* This hardware semaphore causes exclusive access to
87 * resources shared between the NIC driver, MPI firmware,
88 * FCOE firmware and the FC driver.
89 */
90static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
91{
92 u32 sem_bits = 0;
93
94 switch (sem_mask) {
95 case SEM_XGMAC0_MASK:
96 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
97 break;
98 case SEM_XGMAC1_MASK:
99 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
100 break;
101 case SEM_ICB_MASK:
102 sem_bits = SEM_SET << SEM_ICB_SHIFT;
103 break;
104 case SEM_MAC_ADDR_MASK:
105 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
106 break;
107 case SEM_FLASH_MASK:
108 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
109 break;
110 case SEM_PROBE_MASK:
111 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
112 break;
113 case SEM_RT_IDX_MASK:
114 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
115 break;
116 case SEM_PROC_REG_MASK:
117 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
118 break;
119 default:
120 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
121 return -EINVAL;
122 }
123
124 ql_write32(qdev, SEM, sem_bits | sem_mask);
125 return !(ql_read32(qdev, SEM) & sem_bits);
126}
127
128int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
129{
130 unsigned int seconds = 3;
131 do {
132 if (!ql_sem_trylock(qdev, sem_mask))
133 return 0;
134 ssleep(1);
135 } while (--seconds);
136 return -ETIMEDOUT;
137}
138
139void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
140{
141 ql_write32(qdev, SEM, sem_mask);
142 ql_read32(qdev, SEM); /* flush */
143}
144
145/* This function waits for a specific bit to come ready
146 * in a given register. It is used mostly by the initialize
147 * process, but is also used in kernel thread API such as
148 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
149 */
150int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
151{
152 u32 temp;
153 int count = UDELAY_COUNT;
154
155 while (count) {
156 temp = ql_read32(qdev, reg);
157
158 /* check for errors */
159 if (temp & err_bit) {
160 QPRINTK(qdev, PROBE, ALERT,
161 "register 0x%.08x access error, value = 0x%.08x!.\n",
162 reg, temp);
163 return -EIO;
164 } else if (temp & bit)
165 return 0;
166 udelay(UDELAY_DELAY);
167 count--;
168 }
169 QPRINTK(qdev, PROBE, ALERT,
170 "Timed out waiting for reg %x to come ready.\n", reg);
171 return -ETIMEDOUT;
172}
173
174/* The CFG register is used to download TX and RX control blocks
175 * to the chip. This function waits for an operation to complete.
176 */
177static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
178{
179 int count = UDELAY_COUNT;
180 u32 temp;
181
182 while (count) {
183 temp = ql_read32(qdev, CFG);
184 if (temp & CFG_LE)
185 return -EIO;
186 if (!(temp & bit))
187 return 0;
188 udelay(UDELAY_DELAY);
189 count--;
190 }
191 return -ETIMEDOUT;
192}
193
194
195/* Used to issue init control blocks to hw. Maps control block,
196 * sets address, triggers download, waits for completion.
197 */
198int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
199 u16 q_id)
200{
201 u64 map;
202 int status = 0;
203 int direction;
204 u32 mask;
205 u32 value;
206
207 direction =
208 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
209 PCI_DMA_FROMDEVICE;
210
211 map = pci_map_single(qdev->pdev, ptr, size, direction);
212 if (pci_dma_mapping_error(qdev->pdev, map)) {
213 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
214 return -ENOMEM;
215 }
216
217 status = ql_wait_cfg(qdev, bit);
218 if (status) {
219 QPRINTK(qdev, IFUP, ERR,
220 "Timed out waiting for CFG to come ready.\n");
221 goto exit;
222 }
223
224 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
225 if (status)
226 goto exit;
227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
229 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
230
231 mask = CFG_Q_MASK | (bit << 16);
232 value = bit | (q_id << CFG_Q_SHIFT);
233 ql_write32(qdev, CFG, (mask | value));
234
235 /*
236 * Wait for the bit to clear after signaling hw.
237 */
238 status = ql_wait_cfg(qdev, bit);
239exit:
240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
251 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
252 if (status)
253 return status;
254 switch (type) {
255 case MAC_ADDR_TYPE_MULTI_MAC:
256 case MAC_ADDR_TYPE_CAM_MAC:
257 {
258 status =
259 ql_wait_reg_rdy(qdev,
260 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
261 if (status)
262 goto exit;
263 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
264 (index << MAC_ADDR_IDX_SHIFT) | /* index */
265 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
266 status =
267 ql_wait_reg_rdy(qdev,
268 MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
269 if (status)
270 goto exit;
271 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
272 status =
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 if (type == MAC_ADDR_TYPE_CAM_MAC) {
287 status =
288 ql_wait_reg_rdy(qdev,
289 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
290 if (status)
291 goto exit;
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 status =
296 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
297 MAC_ADDR_MR, MAC_ADDR_E);
298 if (status)
299 goto exit;
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 }
302 break;
303 }
304 case MAC_ADDR_TYPE_VLAN:
305 case MAC_ADDR_TYPE_MULTI_FLTR:
306 default:
307 QPRINTK(qdev, IFUP, CRIT,
308 "Address type %d not yet supported.\n", type);
309 status = -EPERM;
310 }
311exit:
312 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
313 return status;
314}
315
316/* Set up a MAC, multicast or VLAN address for the
317 * inbound frame matching.
318 */
319static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
320 u16 index)
321{
322 u32 offset = 0;
323 int status = 0;
324
325 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
326 if (status)
327 return status;
328 switch (type) {
329 case MAC_ADDR_TYPE_MULTI_MAC:
330 case MAC_ADDR_TYPE_CAM_MAC:
331 {
332 u32 cam_output;
333 u32 upper = (addr[0] << 8) | addr[1];
334 u32 lower =
335 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
336 (addr[5]);
337
338 QPRINTK(qdev, IFUP, INFO,
339 "Adding %s address %02x:%02x:%02x:%02x:%02x:%02x"
340 " at index %d in the CAM.\n",
341 ((type ==
342 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
343 "UNICAST"), addr[0], addr[1], addr[2], addr[3],
344 addr[4], addr[5], index);
345
346 status =
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
349 if (status)
350 goto exit;
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
352 (index << MAC_ADDR_IDX_SHIFT) | /* index */
353 type); /* type */
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 status =
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
358 if (status)
359 goto exit;
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
361 (index << MAC_ADDR_IDX_SHIFT) | /* index */
362 type); /* type */
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
367 if (status)
368 goto exit;
369 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
370 (index << MAC_ADDR_IDX_SHIFT) | /* index */
371 type); /* type */
372 /* This field should also include the queue id
373 and possibly the function id. Right now we hardcode
374 the route field to NIC core.
375 */
376 if (type == MAC_ADDR_TYPE_CAM_MAC) {
377 cam_output = (CAM_OUT_ROUTE_NIC |
378 (qdev->
379 func << CAM_OUT_FUNC_SHIFT) |
380 (qdev->
381 rss_ring_first_cq_id <<
382 CAM_OUT_CQ_ID_SHIFT));
383 if (qdev->vlgrp)
384 cam_output |= CAM_OUT_RV;
385 /* route to NIC core */
386 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
387 }
388 break;
389 }
390 case MAC_ADDR_TYPE_VLAN:
391 {
392 u32 enable_bit = *((u32 *) &addr[0]);
393 /* For VLAN, the addr actually holds a bit that
394 * either enables or disables the vlan id we are
395 * addressing. It's either MAC_ADDR_E on or off.
396 * That's bit-27 we're talking about.
397 */
398 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
399 (enable_bit ? "Adding" : "Removing"),
400 index, (enable_bit ? "to" : "from"));
401
402 status =
403 ql_wait_reg_rdy(qdev,
404 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
405 if (status)
406 goto exit;
407 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
408 (index << MAC_ADDR_IDX_SHIFT) | /* index */
409 type | /* type */
410 enable_bit); /* enable/disable */
411 break;
412 }
413 case MAC_ADDR_TYPE_MULTI_FLTR:
414 default:
415 QPRINTK(qdev, IFUP, CRIT,
416 "Address type %d not yet supported.\n", type);
417 status = -EPERM;
418 }
419exit:
420 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
421 return status;
422}
423
424/* Get a specific frame routing value from the CAM.
425 * Used for debug and reg dump.
426 */
427int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
428{
429 int status = 0;
430
431 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
432 if (status)
433 goto exit;
434
435 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, RT_IDX_E);
436 if (status)
437 goto exit;
438
439 ql_write32(qdev, RT_IDX,
440 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
441 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, RT_IDX_E);
442 if (status)
443 goto exit;
444 *value = ql_read32(qdev, RT_DATA);
445exit:
446 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
447 return status;
448}
449
450/* The NIC function for this chip has 16 routing indexes. Each one can be used
451 * to route different frame types to various inbound queues. We send broadcast/
452 * multicast/error frames to the default queue for slow handling,
453 * and CAM hit/RSS frames to the fast handling queues.
454 */
455static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
456 int enable)
457{
458 int status;
459 u32 value = 0;
460
461 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
462 if (status)
463 return status;
464
465 QPRINTK(qdev, IFUP, DEBUG,
466 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
467 (enable ? "Adding" : "Removing"),
468 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
469 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
470 ((index ==
471 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
472 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
473 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
474 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
475 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
476 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
477 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
478 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
479 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
480 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
481 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
482 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
483 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
484 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
485 (enable ? "to" : "from"));
486
487 switch (mask) {
488 case RT_IDX_CAM_HIT:
489 {
490 value = RT_IDX_DST_CAM_Q | /* dest */
491 RT_IDX_TYPE_NICQ | /* type */
492 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
493 break;
494 }
495 case RT_IDX_VALID: /* Promiscuous Mode frames. */
496 {
497 value = RT_IDX_DST_DFLT_Q | /* dest */
498 RT_IDX_TYPE_NICQ | /* type */
499 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
500 break;
501 }
502 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
503 {
504 value = RT_IDX_DST_DFLT_Q | /* dest */
505 RT_IDX_TYPE_NICQ | /* type */
506 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
507 break;
508 }
509 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
510 {
511 value = RT_IDX_DST_DFLT_Q | /* dest */
512 RT_IDX_TYPE_NICQ | /* type */
513 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
514 break;
515 }
516 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
517 {
518 value = RT_IDX_DST_CAM_Q | /* dest */
519 RT_IDX_TYPE_NICQ | /* type */
520 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
521 break;
522 }
523 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
524 {
525 value = RT_IDX_DST_CAM_Q | /* dest */
526 RT_IDX_TYPE_NICQ | /* type */
527 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
528 break;
529 }
530 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
531 {
532 value = RT_IDX_DST_RSS | /* dest */
533 RT_IDX_TYPE_NICQ | /* type */
534 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
535 break;
536 }
537 case 0: /* Clear the E-bit on an entry. */
538 {
539 value = RT_IDX_DST_DFLT_Q | /* dest */
540 RT_IDX_TYPE_NICQ | /* type */
541 (index << RT_IDX_IDX_SHIFT);/* index */
542 break;
543 }
544 default:
545 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
546 mask);
547 status = -EPERM;
548 goto exit;
549 }
550
551 if (value) {
552 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
553 if (status)
554 goto exit;
555 value |= (enable ? RT_IDX_E : 0);
556 ql_write32(qdev, RT_IDX, value);
557 ql_write32(qdev, RT_DATA, enable ? mask : 0);
558 }
559exit:
560 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
561 return status;
562}
563
564static void ql_enable_interrupts(struct ql_adapter *qdev)
565{
566 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
567}
568
569static void ql_disable_interrupts(struct ql_adapter *qdev)
570{
571 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
572}
573
574/* If we're running with multiple MSI-X vectors then we enable on the fly.
575 * Otherwise, we may have multiple outstanding workers and don't want to
576 * enable until the last one finishes. In this case, the irq_cnt gets
577 * incremented everytime we queue a worker and decremented everytime
578 * a worker finishes. Once it hits zero we enable the interrupt.
579 */
580void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
581{
582 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
583 ql_write32(qdev, INTR_EN,
584 qdev->intr_context[intr].intr_en_mask);
585 else {
586 if (qdev->legacy_check)
587 spin_lock(&qdev->legacy_lock);
588 if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) {
589 QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n",
590 intr);
591 ql_write32(qdev, INTR_EN,
592 qdev->intr_context[intr].intr_en_mask);
593 } else {
594 QPRINTK(qdev, INTR, ERR,
595 "Skip enable, other queue(s) are active.\n");
596 }
597 if (qdev->legacy_check)
598 spin_unlock(&qdev->legacy_lock);
599 }
600}
601
602static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
603{
604 u32 var = 0;
605
606 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
607 goto exit;
608 else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) {
609 ql_write32(qdev, INTR_EN,
610 qdev->intr_context[intr].intr_dis_mask);
611 var = ql_read32(qdev, STS);
612 }
613 atomic_inc(&qdev->intr_context[intr].irq_cnt);
614exit:
615 return var;
616}
617
618static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
619{
620 int i;
621 for (i = 0; i < qdev->intr_count; i++) {
622 /* The enable call does a atomic_dec_and_test
623 * and enables only if the result is zero.
624 * So we precharge it here.
625 */
626 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
627 ql_enable_completion_interrupt(qdev, i);
628 }
629
630}
631
632int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
633{
634 int status = 0;
635 /* wait for reg to come ready */
636 status = ql_wait_reg_rdy(qdev,
637 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
638 if (status)
639 goto exit;
640 /* set up for reg read */
641 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
642 /* wait for reg to come ready */
643 status = ql_wait_reg_rdy(qdev,
644 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
645 if (status)
646 goto exit;
647 /* get the data */
648 *data = ql_read32(qdev, FLASH_DATA);
649exit:
650 return status;
651}
652
653static int ql_get_flash_params(struct ql_adapter *qdev)
654{
655 int i;
656 int status;
657 u32 *p = (u32 *)&qdev->flash;
658
659 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
660 return -ETIMEDOUT;
661
662 for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
663 status = ql_read_flash_word(qdev, i, p);
664 if (status) {
665 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
666 goto exit;
667 }
668
669 }
670exit:
671 ql_sem_unlock(qdev, SEM_FLASH_MASK);
672 return status;
673}
674
675/* xgmac register are located behind the xgmac_addr and xgmac_data
676 * register pair. Each read/write requires us to wait for the ready
677 * bit before reading/writing the data.
678 */
679static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
680{
681 int status;
682 /* wait for reg to come ready */
683 status = ql_wait_reg_rdy(qdev,
684 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
685 if (status)
686 return status;
687 /* write the data to the data reg */
688 ql_write32(qdev, XGMAC_DATA, data);
689 /* trigger the write */
690 ql_write32(qdev, XGMAC_ADDR, reg);
691 return status;
692}
693
694/* xgmac register are located behind the xgmac_addr and xgmac_data
695 * register pair. Each read/write requires us to wait for the ready
696 * bit before reading/writing the data.
697 */
698int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
699{
700 int status = 0;
701 /* wait for reg to come ready */
702 status = ql_wait_reg_rdy(qdev,
703 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
704 if (status)
705 goto exit;
706 /* set up for reg read */
707 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
708 /* wait for reg to come ready */
709 status = ql_wait_reg_rdy(qdev,
710 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
711 if (status)
712 goto exit;
713 /* get the data */
714 *data = ql_read32(qdev, XGMAC_DATA);
715exit:
716 return status;
717}
718
719/* This is used for reading the 64-bit statistics regs. */
720int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
721{
722 int status = 0;
723 u32 hi = 0;
724 u32 lo = 0;
725
726 status = ql_read_xgmac_reg(qdev, reg, &lo);
727 if (status)
728 goto exit;
729
730 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
731 if (status)
732 goto exit;
733
734 *data = (u64) lo | ((u64) hi << 32);
735
736exit:
737 return status;
738}
739
740/* Take the MAC Core out of reset.
741 * Enable statistics counting.
742 * Take the transmitter/receiver out of reset.
743 * This functionality may be done in the MPI firmware at a
744 * later date.
745 */
746static int ql_port_initialize(struct ql_adapter *qdev)
747{
748 int status = 0;
749 u32 data;
750
751 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
752 /* Another function has the semaphore, so
753 * wait for the port init bit to come ready.
754 */
755 QPRINTK(qdev, LINK, INFO,
756 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
757 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
758 if (status) {
759 QPRINTK(qdev, LINK, CRIT,
760 "Port initialize timed out.\n");
761 }
762 return status;
763 }
764
765 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
766 /* Set the core reset. */
767 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
768 if (status)
769 goto end;
770 data |= GLOBAL_CFG_RESET;
771 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
772 if (status)
773 goto end;
774
775 /* Clear the core reset and turn on jumbo for receiver. */
776 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
777 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
778 data |= GLOBAL_CFG_TX_STAT_EN;
779 data |= GLOBAL_CFG_RX_STAT_EN;
780 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
781 if (status)
782 goto end;
783
784 /* Enable transmitter, and clear it's reset. */
785 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
786 if (status)
787 goto end;
788 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
789 data |= TX_CFG_EN; /* Enable the transmitter. */
790 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
791 if (status)
792 goto end;
793
794 /* Enable receiver and clear it's reset. */
795 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
796 if (status)
797 goto end;
798 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
799 data |= RX_CFG_EN; /* Enable the receiver. */
800 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
801 if (status)
802 goto end;
803
804 /* Turn on jumbo. */
805 status =
806 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
807 if (status)
808 goto end;
809 status =
810 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
811 if (status)
812 goto end;
813
814 /* Signal to the world that the port is enabled. */
815 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
816end:
817 ql_sem_unlock(qdev, qdev->xg_sem_mask);
818 return status;
819}
820
821/* Get the next large buffer. */
822struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
823{
824 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
825 rx_ring->lbq_curr_idx++;
826 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
827 rx_ring->lbq_curr_idx = 0;
828 rx_ring->lbq_free_cnt++;
829 return lbq_desc;
830}
831
832/* Get the next small buffer. */
833struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
834{
835 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
836 rx_ring->sbq_curr_idx++;
837 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
838 rx_ring->sbq_curr_idx = 0;
839 rx_ring->sbq_free_cnt++;
840 return sbq_desc;
841}
842
843/* Update an rx ring index. */
844static void ql_update_cq(struct rx_ring *rx_ring)
845{
846 rx_ring->cnsmr_idx++;
847 rx_ring->curr_entry++;
848 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
849 rx_ring->cnsmr_idx = 0;
850 rx_ring->curr_entry = rx_ring->cq_base;
851 }
852}
853
854static void ql_write_cq_idx(struct rx_ring *rx_ring)
855{
856 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
857}
858
859/* Process (refill) a large buffer queue. */
860static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
861{
862 int clean_idx = rx_ring->lbq_clean_idx;
863 struct bq_desc *lbq_desc;
864 struct bq_element *bq;
865 u64 map;
866 int i;
867
868 while (rx_ring->lbq_free_cnt > 16) {
869 for (i = 0; i < 16; i++) {
870 QPRINTK(qdev, RX_STATUS, DEBUG,
871 "lbq: try cleaning clean_idx = %d.\n",
872 clean_idx);
873 lbq_desc = &rx_ring->lbq[clean_idx];
874 bq = lbq_desc->bq;
875 if (lbq_desc->p.lbq_page == NULL) {
876 QPRINTK(qdev, RX_STATUS, DEBUG,
877 "lbq: getting new page for index %d.\n",
878 lbq_desc->index);
879 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
880 if (lbq_desc->p.lbq_page == NULL) {
881 QPRINTK(qdev, RX_STATUS, ERR,
882 "Couldn't get a page.\n");
883 return;
884 }
885 map = pci_map_page(qdev->pdev,
886 lbq_desc->p.lbq_page,
887 0, PAGE_SIZE,
888 PCI_DMA_FROMDEVICE);
889 if (pci_dma_mapping_error(qdev->pdev, map)) {
890 QPRINTK(qdev, RX_STATUS, ERR,
891 "PCI mapping failed.\n");
892 return;
893 }
894 pci_unmap_addr_set(lbq_desc, mapaddr, map);
895 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
896 bq->addr_lo = /*lbq_desc->addr_lo = */
897 cpu_to_le32(map);
898 bq->addr_hi = /*lbq_desc->addr_hi = */
899 cpu_to_le32(map >> 32);
900 }
901 clean_idx++;
902 if (clean_idx == rx_ring->lbq_len)
903 clean_idx = 0;
904 }
905
906 rx_ring->lbq_clean_idx = clean_idx;
907 rx_ring->lbq_prod_idx += 16;
908 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
909 rx_ring->lbq_prod_idx = 0;
910 QPRINTK(qdev, RX_STATUS, DEBUG,
911 "lbq: updating prod idx = %d.\n",
912 rx_ring->lbq_prod_idx);
913 ql_write_db_reg(rx_ring->lbq_prod_idx,
914 rx_ring->lbq_prod_idx_db_reg);
915 rx_ring->lbq_free_cnt -= 16;
916 }
917}
918
919/* Process (refill) a small buffer queue. */
920static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
921{
922 int clean_idx = rx_ring->sbq_clean_idx;
923 struct bq_desc *sbq_desc;
924 struct bq_element *bq;
925 u64 map;
926 int i;
927
928 while (rx_ring->sbq_free_cnt > 16) {
929 for (i = 0; i < 16; i++) {
930 sbq_desc = &rx_ring->sbq[clean_idx];
931 QPRINTK(qdev, RX_STATUS, DEBUG,
932 "sbq: try cleaning clean_idx = %d.\n",
933 clean_idx);
934 bq = sbq_desc->bq;
935 if (sbq_desc->p.skb == NULL) {
936 QPRINTK(qdev, RX_STATUS, DEBUG,
937 "sbq: getting new skb for index %d.\n",
938 sbq_desc->index);
939 sbq_desc->p.skb =
940 netdev_alloc_skb(qdev->ndev,
941 rx_ring->sbq_buf_size);
942 if (sbq_desc->p.skb == NULL) {
943 QPRINTK(qdev, PROBE, ERR,
944 "Couldn't get an skb.\n");
945 rx_ring->sbq_clean_idx = clean_idx;
946 return;
947 }
948 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
949 map = pci_map_single(qdev->pdev,
950 sbq_desc->p.skb->data,
951 rx_ring->sbq_buf_size /
952 2, PCI_DMA_FROMDEVICE);
953 pci_unmap_addr_set(sbq_desc, mapaddr, map);
954 pci_unmap_len_set(sbq_desc, maplen,
955 rx_ring->sbq_buf_size / 2);
956 bq->addr_lo = cpu_to_le32(map);
957 bq->addr_hi = cpu_to_le32(map >> 32);
958 }
959
960 clean_idx++;
961 if (clean_idx == rx_ring->sbq_len)
962 clean_idx = 0;
963 }
964 rx_ring->sbq_clean_idx = clean_idx;
965 rx_ring->sbq_prod_idx += 16;
966 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
967 rx_ring->sbq_prod_idx = 0;
968 QPRINTK(qdev, RX_STATUS, DEBUG,
969 "sbq: updating prod idx = %d.\n",
970 rx_ring->sbq_prod_idx);
971 ql_write_db_reg(rx_ring->sbq_prod_idx,
972 rx_ring->sbq_prod_idx_db_reg);
973
974 rx_ring->sbq_free_cnt -= 16;
975 }
976}
977
978static void ql_update_buffer_queues(struct ql_adapter *qdev,
979 struct rx_ring *rx_ring)
980{
981 ql_update_sbq(qdev, rx_ring);
982 ql_update_lbq(qdev, rx_ring);
983}
984
985/* Unmaps tx buffers. Can be called from send() if a pci mapping
986 * fails at some stage, or from the interrupt when a tx completes.
987 */
988static void ql_unmap_send(struct ql_adapter *qdev,
989 struct tx_ring_desc *tx_ring_desc, int mapped)
990{
991 int i;
992 for (i = 0; i < mapped; i++) {
993 if (i == 0 || (i == 7 && mapped > 7)) {
994 /*
995 * Unmap the skb->data area, or the
996 * external sglist (AKA the Outbound
997 * Address List (OAL)).
998 * If its the zeroeth element, then it's
999 * the skb->data area. If it's the 7th
1000 * element and there is more than 6 frags,
1001 * then its an OAL.
1002 */
1003 if (i == 7) {
1004 QPRINTK(qdev, TX_DONE, DEBUG,
1005 "unmapping OAL area.\n");
1006 }
1007 pci_unmap_single(qdev->pdev,
1008 pci_unmap_addr(&tx_ring_desc->map[i],
1009 mapaddr),
1010 pci_unmap_len(&tx_ring_desc->map[i],
1011 maplen),
1012 PCI_DMA_TODEVICE);
1013 } else {
1014 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1015 i);
1016 pci_unmap_page(qdev->pdev,
1017 pci_unmap_addr(&tx_ring_desc->map[i],
1018 mapaddr),
1019 pci_unmap_len(&tx_ring_desc->map[i],
1020 maplen), PCI_DMA_TODEVICE);
1021 }
1022 }
1023
1024}
1025
1026/* Map the buffers for this transmit. This will return
1027 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1028 */
1029static int ql_map_send(struct ql_adapter *qdev,
1030 struct ob_mac_iocb_req *mac_iocb_ptr,
1031 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1032{
1033 int len = skb_headlen(skb);
1034 dma_addr_t map;
1035 int frag_idx, err, map_idx = 0;
1036 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1037 int frag_cnt = skb_shinfo(skb)->nr_frags;
1038
1039 if (frag_cnt) {
1040 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1041 }
1042 /*
1043 * Map the skb buffer first.
1044 */
1045 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1046
1047 err = pci_dma_mapping_error(qdev->pdev, map);
1048 if (err) {
1049 QPRINTK(qdev, TX_QUEUED, ERR,
1050 "PCI mapping failed with error: %d\n", err);
1051
1052 return NETDEV_TX_BUSY;
1053 }
1054
1055 tbd->len = cpu_to_le32(len);
1056 tbd->addr = cpu_to_le64(map);
1057 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1058 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1059 map_idx++;
1060
1061 /*
1062 * This loop fills the remainder of the 8 address descriptors
1063 * in the IOCB. If there are more than 7 fragments, then the
1064 * eighth address desc will point to an external list (OAL).
1065 * When this happens, the remainder of the frags will be stored
1066 * in this list.
1067 */
1068 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1069 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1070 tbd++;
1071 if (frag_idx == 6 && frag_cnt > 7) {
1072 /* Let's tack on an sglist.
1073 * Our control block will now
1074 * look like this:
1075 * iocb->seg[0] = skb->data
1076 * iocb->seg[1] = frag[0]
1077 * iocb->seg[2] = frag[1]
1078 * iocb->seg[3] = frag[2]
1079 * iocb->seg[4] = frag[3]
1080 * iocb->seg[5] = frag[4]
1081 * iocb->seg[6] = frag[5]
1082 * iocb->seg[7] = ptr to OAL (external sglist)
1083 * oal->seg[0] = frag[6]
1084 * oal->seg[1] = frag[7]
1085 * oal->seg[2] = frag[8]
1086 * oal->seg[3] = frag[9]
1087 * oal->seg[4] = frag[10]
1088 * etc...
1089 */
1090 /* Tack on the OAL in the eighth segment of IOCB. */
1091 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1092 sizeof(struct oal),
1093 PCI_DMA_TODEVICE);
1094 err = pci_dma_mapping_error(qdev->pdev, map);
1095 if (err) {
1096 QPRINTK(qdev, TX_QUEUED, ERR,
1097 "PCI mapping outbound address list with error: %d\n",
1098 err);
1099 goto map_error;
1100 }
1101
1102 tbd->addr = cpu_to_le64(map);
1103 /*
1104 * The length is the number of fragments
1105 * that remain to be mapped times the length
1106 * of our sglist (OAL).
1107 */
1108 tbd->len =
1109 cpu_to_le32((sizeof(struct tx_buf_desc) *
1110 (frag_cnt - frag_idx)) | TX_DESC_C);
1111 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1112 map);
1113 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1114 sizeof(struct oal));
1115 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1116 map_idx++;
1117 }
1118
1119 map =
1120 pci_map_page(qdev->pdev, frag->page,
1121 frag->page_offset, frag->size,
1122 PCI_DMA_TODEVICE);
1123
1124 err = pci_dma_mapping_error(qdev->pdev, map);
1125 if (err) {
1126 QPRINTK(qdev, TX_QUEUED, ERR,
1127 "PCI mapping frags failed with error: %d.\n",
1128 err);
1129 goto map_error;
1130 }
1131
1132 tbd->addr = cpu_to_le64(map);
1133 tbd->len = cpu_to_le32(frag->size);
1134 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1135 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1136 frag->size);
1137
1138 }
1139 /* Save the number of segments we've mapped. */
1140 tx_ring_desc->map_cnt = map_idx;
1141 /* Terminate the last segment. */
1142 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1143 return NETDEV_TX_OK;
1144
1145map_error:
1146 /*
1147 * If the first frag mapping failed, then i will be zero.
1148 * This causes the unmap of the skb->data area. Otherwise
1149 * we pass in the number of frags that mapped successfully
1150 * so they can be umapped.
1151 */
1152 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1153 return NETDEV_TX_BUSY;
1154}
1155
1156void ql_realign_skb(struct sk_buff *skb, int len)
1157{
1158 void *temp_addr = skb->data;
1159
1160 /* Undo the skb_reserve(skb,32) we did before
1161 * giving to hardware, and realign data on
1162 * a 2-byte boundary.
1163 */
1164 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1165 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1166 skb_copy_to_linear_data(skb, temp_addr,
1167 (unsigned int)len);
1168}
1169
1170/*
1171 * This function builds an skb for the given inbound
1172 * completion. It will be rewritten for readability in the near
1173 * future, but for not it works well.
1174 */
1175static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1176 struct rx_ring *rx_ring,
1177 struct ib_mac_iocb_rsp *ib_mac_rsp)
1178{
1179 struct bq_desc *lbq_desc;
1180 struct bq_desc *sbq_desc;
1181 struct sk_buff *skb = NULL;
1182 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1183 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1184
1185 /*
1186 * Handle the header buffer if present.
1187 */
1188 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1189 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1190 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1191 /*
1192 * Headers fit nicely into a small buffer.
1193 */
1194 sbq_desc = ql_get_curr_sbuf(rx_ring);
1195 pci_unmap_single(qdev->pdev,
1196 pci_unmap_addr(sbq_desc, mapaddr),
1197 pci_unmap_len(sbq_desc, maplen),
1198 PCI_DMA_FROMDEVICE);
1199 skb = sbq_desc->p.skb;
1200 ql_realign_skb(skb, hdr_len);
1201 skb_put(skb, hdr_len);
1202 sbq_desc->p.skb = NULL;
1203 }
1204
1205 /*
1206 * Handle the data buffer(s).
1207 */
1208 if (unlikely(!length)) { /* Is there data too? */
1209 QPRINTK(qdev, RX_STATUS, DEBUG,
1210 "No Data buffer in this packet.\n");
1211 return skb;
1212 }
1213
1214 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1215 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1216 QPRINTK(qdev, RX_STATUS, DEBUG,
1217 "Headers in small, data of %d bytes in small, combine them.\n", length);
1218 /*
1219 * Data is less than small buffer size so it's
1220 * stuffed in a small buffer.
1221 * For this case we append the data
1222 * from the "data" small buffer to the "header" small
1223 * buffer.
1224 */
1225 sbq_desc = ql_get_curr_sbuf(rx_ring);
1226 pci_dma_sync_single_for_cpu(qdev->pdev,
1227 pci_unmap_addr
1228 (sbq_desc, mapaddr),
1229 pci_unmap_len
1230 (sbq_desc, maplen),
1231 PCI_DMA_FROMDEVICE);
1232 memcpy(skb_put(skb, length),
1233 sbq_desc->p.skb->data, length);
1234 pci_dma_sync_single_for_device(qdev->pdev,
1235 pci_unmap_addr
1236 (sbq_desc,
1237 mapaddr),
1238 pci_unmap_len
1239 (sbq_desc,
1240 maplen),
1241 PCI_DMA_FROMDEVICE);
1242 } else {
1243 QPRINTK(qdev, RX_STATUS, DEBUG,
1244 "%d bytes in a single small buffer.\n", length);
1245 sbq_desc = ql_get_curr_sbuf(rx_ring);
1246 skb = sbq_desc->p.skb;
1247 ql_realign_skb(skb, length);
1248 skb_put(skb, length);
1249 pci_unmap_single(qdev->pdev,
1250 pci_unmap_addr(sbq_desc,
1251 mapaddr),
1252 pci_unmap_len(sbq_desc,
1253 maplen),
1254 PCI_DMA_FROMDEVICE);
1255 sbq_desc->p.skb = NULL;
1256 }
1257 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1258 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1259 QPRINTK(qdev, RX_STATUS, DEBUG,
1260 "Header in small, %d bytes in large. Chain large to small!\n", length);
1261 /*
1262 * The data is in a single large buffer. We
1263 * chain it to the header buffer's skb and let
1264 * it rip.
1265 */
1266 lbq_desc = ql_get_curr_lbuf(rx_ring);
1267 pci_unmap_page(qdev->pdev,
1268 pci_unmap_addr(lbq_desc,
1269 mapaddr),
1270 pci_unmap_len(lbq_desc, maplen),
1271 PCI_DMA_FROMDEVICE);
1272 QPRINTK(qdev, RX_STATUS, DEBUG,
1273 "Chaining page to skb.\n");
1274 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1275 0, length);
1276 skb->len += length;
1277 skb->data_len += length;
1278 skb->truesize += length;
1279 lbq_desc->p.lbq_page = NULL;
1280 } else {
1281 /*
1282 * The headers and data are in a single large buffer. We
1283 * copy it to a new skb and let it go. This can happen with
1284 * jumbo mtu on a non-TCP/UDP frame.
1285 */
1286 lbq_desc = ql_get_curr_lbuf(rx_ring);
1287 skb = netdev_alloc_skb(qdev->ndev, length);
1288 if (skb == NULL) {
1289 QPRINTK(qdev, PROBE, DEBUG,
1290 "No skb available, drop the packet.\n");
1291 return NULL;
1292 }
1293 skb_reserve(skb, NET_IP_ALIGN);
1294 QPRINTK(qdev, RX_STATUS, DEBUG,
1295 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1296 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1297 0, length);
1298 skb->len += length;
1299 skb->data_len += length;
1300 skb->truesize += length;
1301 length -= length;
1302 lbq_desc->p.lbq_page = NULL;
1303 __pskb_pull_tail(skb,
1304 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1305 VLAN_ETH_HLEN : ETH_HLEN);
1306 }
1307 } else {
1308 /*
1309 * The data is in a chain of large buffers
1310 * pointed to by a small buffer. We loop
1311 * thru and chain them to the our small header
1312 * buffer's skb.
1313 * frags: There are 18 max frags and our small
1314 * buffer will hold 32 of them. The thing is,
1315 * we'll use 3 max for our 9000 byte jumbo
1316 * frames. If the MTU goes up we could
1317 * eventually be in trouble.
1318 */
1319 int size, offset, i = 0;
1320 struct bq_element *bq, bq_array[8];
1321 sbq_desc = ql_get_curr_sbuf(rx_ring);
1322 pci_unmap_single(qdev->pdev,
1323 pci_unmap_addr(sbq_desc, mapaddr),
1324 pci_unmap_len(sbq_desc, maplen),
1325 PCI_DMA_FROMDEVICE);
1326 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1327 /*
1328 * This is an non TCP/UDP IP frame, so
1329 * the headers aren't split into a small
1330 * buffer. We have to use the small buffer
1331 * that contains our sg list as our skb to
1332 * send upstairs. Copy the sg list here to
1333 * a local buffer and use it to find the
1334 * pages to chain.
1335 */
1336 QPRINTK(qdev, RX_STATUS, DEBUG,
1337 "%d bytes of headers & data in chain of large.\n", length);
1338 skb = sbq_desc->p.skb;
1339 bq = &bq_array[0];
1340 memcpy(bq, skb->data, sizeof(bq_array));
1341 sbq_desc->p.skb = NULL;
1342 skb_reserve(skb, NET_IP_ALIGN);
1343 } else {
1344 QPRINTK(qdev, RX_STATUS, DEBUG,
1345 "Headers in small, %d bytes of data in chain of large.\n", length);
1346 bq = (struct bq_element *)sbq_desc->p.skb->data;
1347 }
1348 while (length > 0) {
1349 lbq_desc = ql_get_curr_lbuf(rx_ring);
1350 if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) {
1351 QPRINTK(qdev, RX_STATUS, ERR,
1352 "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n",
1353 lbq_desc->bq->addr_lo, bq->addr_lo);
1354 return NULL;
1355 }
1356 pci_unmap_page(qdev->pdev,
1357 pci_unmap_addr(lbq_desc,
1358 mapaddr),
1359 pci_unmap_len(lbq_desc,
1360 maplen),
1361 PCI_DMA_FROMDEVICE);
1362 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1363 offset = 0;
1364
1365 QPRINTK(qdev, RX_STATUS, DEBUG,
1366 "Adding page %d to skb for %d bytes.\n",
1367 i, size);
1368 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1369 offset, size);
1370 skb->len += size;
1371 skb->data_len += size;
1372 skb->truesize += size;
1373 length -= size;
1374 lbq_desc->p.lbq_page = NULL;
1375 bq++;
1376 i++;
1377 }
1378 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1379 VLAN_ETH_HLEN : ETH_HLEN);
1380 }
1381 return skb;
1382}
1383
1384/* Process an inbound completion from an rx ring. */
1385static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1386 struct rx_ring *rx_ring,
1387 struct ib_mac_iocb_rsp *ib_mac_rsp)
1388{
1389 struct net_device *ndev = qdev->ndev;
1390 struct sk_buff *skb = NULL;
1391
1392 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1393
1394 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1395 if (unlikely(!skb)) {
1396 QPRINTK(qdev, RX_STATUS, DEBUG,
1397 "No skb available, drop packet.\n");
1398 return;
1399 }
1400
1401 prefetch(skb->data);
1402 skb->dev = ndev;
1403 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1404 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1405 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1406 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1407 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1408 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1409 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1410 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1411 }
1412 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1413 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1414 }
1415 if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
1416 QPRINTK(qdev, RX_STATUS, ERR,
1417 "Bad checksum for this %s packet.\n",
1418 ((ib_mac_rsp->
1419 flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
1420 skb->ip_summed = CHECKSUM_NONE;
1421 } else if (qdev->rx_csum &&
1422 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
1423 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1424 !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
1425 QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
1426 skb->ip_summed = CHECKSUM_UNNECESSARY;
1427 }
1428 qdev->stats.rx_packets++;
1429 qdev->stats.rx_bytes += skb->len;
1430 skb->protocol = eth_type_trans(skb, ndev);
1431 if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
1432 QPRINTK(qdev, RX_STATUS, DEBUG,
1433 "Passing a VLAN packet upstream.\n");
1434 vlan_hwaccel_rx(skb, qdev->vlgrp,
1435 le16_to_cpu(ib_mac_rsp->vlan_id));
1436 } else {
1437 QPRINTK(qdev, RX_STATUS, DEBUG,
1438 "Passing a normal packet upstream.\n");
1439 netif_rx(skb);
1440 }
1441 ndev->last_rx = jiffies;
1442}
1443
1444/* Process an outbound completion from an rx ring. */
1445static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1446 struct ob_mac_iocb_rsp *mac_rsp)
1447{
1448 struct tx_ring *tx_ring;
1449 struct tx_ring_desc *tx_ring_desc;
1450
1451 QL_DUMP_OB_MAC_RSP(mac_rsp);
1452 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1453 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1454 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1455 qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
1456 qdev->stats.tx_packets++;
1457 dev_kfree_skb(tx_ring_desc->skb);
1458 tx_ring_desc->skb = NULL;
1459
1460 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1461 OB_MAC_IOCB_RSP_S |
1462 OB_MAC_IOCB_RSP_L |
1463 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1464 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1465 QPRINTK(qdev, TX_DONE, WARNING,
1466 "Total descriptor length did not match transfer length.\n");
1467 }
1468 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1469 QPRINTK(qdev, TX_DONE, WARNING,
1470 "Frame too short to be legal, not sent.\n");
1471 }
1472 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1473 QPRINTK(qdev, TX_DONE, WARNING,
1474 "Frame too long, but sent anyway.\n");
1475 }
1476 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1477 QPRINTK(qdev, TX_DONE, WARNING,
1478 "PCI backplane error. Frame not sent.\n");
1479 }
1480 }
1481 atomic_inc(&tx_ring->tx_count);
1482}
1483
1484/* Fire up a handler to reset the MPI processor. */
1485void ql_queue_fw_error(struct ql_adapter *qdev)
1486{
1487 netif_stop_queue(qdev->ndev);
1488 netif_carrier_off(qdev->ndev);
1489 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1490}
1491
1492void ql_queue_asic_error(struct ql_adapter *qdev)
1493{
1494 netif_stop_queue(qdev->ndev);
1495 netif_carrier_off(qdev->ndev);
1496 ql_disable_interrupts(qdev);
1497 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1498}
1499
1500static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1501 struct ib_ae_iocb_rsp *ib_ae_rsp)
1502{
1503 switch (ib_ae_rsp->event) {
1504 case MGMT_ERR_EVENT:
1505 QPRINTK(qdev, RX_ERR, ERR,
1506 "Management Processor Fatal Error.\n");
1507 ql_queue_fw_error(qdev);
1508 return;
1509
1510 case CAM_LOOKUP_ERR_EVENT:
1511 QPRINTK(qdev, LINK, ERR,
1512 "Multiple CAM hits lookup occurred.\n");
1513 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1514 ql_queue_asic_error(qdev);
1515 return;
1516
1517 case SOFT_ECC_ERROR_EVENT:
1518 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1519 ql_queue_asic_error(qdev);
1520 break;
1521
1522 case PCI_ERR_ANON_BUF_RD:
1523 QPRINTK(qdev, RX_ERR, ERR,
1524 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1525 ib_ae_rsp->q_id);
1526 ql_queue_asic_error(qdev);
1527 break;
1528
1529 default:
1530 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1531 ib_ae_rsp->event);
1532 ql_queue_asic_error(qdev);
1533 break;
1534 }
1535}
1536
1537static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1538{
1539 struct ql_adapter *qdev = rx_ring->qdev;
1540 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1541 struct ob_mac_iocb_rsp *net_rsp = NULL;
1542 int count = 0;
1543
1544 /* While there are entries in the completion queue. */
1545 while (prod != rx_ring->cnsmr_idx) {
1546
1547 QPRINTK(qdev, RX_STATUS, DEBUG,
1548 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1549 prod, rx_ring->cnsmr_idx);
1550
1551 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1552 rmb();
1553 switch (net_rsp->opcode) {
1554
1555 case OPCODE_OB_MAC_TSO_IOCB:
1556 case OPCODE_OB_MAC_IOCB:
1557 ql_process_mac_tx_intr(qdev, net_rsp);
1558 break;
1559 default:
1560 QPRINTK(qdev, RX_STATUS, DEBUG,
1561 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1562 net_rsp->opcode);
1563 }
1564 count++;
1565 ql_update_cq(rx_ring);
1566 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1567 }
1568 ql_write_cq_idx(rx_ring);
1569 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
1570 struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1571 if (atomic_read(&tx_ring->queue_stopped) &&
1572 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1573 /*
1574 * The queue got stopped because the tx_ring was full.
1575 * Wake it up, because it's now at least 25% empty.
1576 */
1577 netif_wake_queue(qdev->ndev);
1578 }
1579
1580 return count;
1581}
1582
1583static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1584{
1585 struct ql_adapter *qdev = rx_ring->qdev;
1586 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1587 struct ql_net_rsp_iocb *net_rsp;
1588 int count = 0;
1589
1590 /* While there are entries in the completion queue. */
1591 while (prod != rx_ring->cnsmr_idx) {
1592
1593 QPRINTK(qdev, RX_STATUS, DEBUG,
1594 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1595 prod, rx_ring->cnsmr_idx);
1596
1597 net_rsp = rx_ring->curr_entry;
1598 rmb();
1599 switch (net_rsp->opcode) {
1600 case OPCODE_IB_MAC_IOCB:
1601 ql_process_mac_rx_intr(qdev, rx_ring,
1602 (struct ib_mac_iocb_rsp *)
1603 net_rsp);
1604 break;
1605
1606 case OPCODE_IB_AE_IOCB:
1607 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1608 net_rsp);
1609 break;
1610 default:
1611 {
1612 QPRINTK(qdev, RX_STATUS, DEBUG,
1613 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1614 net_rsp->opcode);
1615 }
1616 }
1617 count++;
1618 ql_update_cq(rx_ring);
1619 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1620 if (count == budget)
1621 break;
1622 }
1623 ql_update_buffer_queues(qdev, rx_ring);
1624 ql_write_cq_idx(rx_ring);
1625 return count;
1626}
1627
1628static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1629{
1630 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1631 struct ql_adapter *qdev = rx_ring->qdev;
1632 int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1633
1634 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1635 rx_ring->cq_id);
1636
1637 if (work_done < budget) {
1638 __netif_rx_complete(qdev->ndev, napi);
1639 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1640 }
1641 return work_done;
1642}
1643
1644static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1645{
1646 struct ql_adapter *qdev = netdev_priv(ndev);
1647
1648 qdev->vlgrp = grp;
1649 if (grp) {
1650 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1651 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1652 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1653 } else {
1654 QPRINTK(qdev, IFUP, DEBUG,
1655 "Turning off VLAN in NIC_RCV_CFG.\n");
1656 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1657 }
1658}
1659
1660static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1661{
1662 struct ql_adapter *qdev = netdev_priv(ndev);
1663 u32 enable_bit = MAC_ADDR_E;
1664
1665 spin_lock(&qdev->hw_lock);
1666 if (ql_set_mac_addr_reg
1667 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1668 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1669 }
1670 spin_unlock(&qdev->hw_lock);
1671}
1672
1673static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1674{
1675 struct ql_adapter *qdev = netdev_priv(ndev);
1676 u32 enable_bit = 0;
1677
1678 spin_lock(&qdev->hw_lock);
1679 if (ql_set_mac_addr_reg
1680 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1681 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1682 }
1683 spin_unlock(&qdev->hw_lock);
1684
1685}
1686
1687/* Worker thread to process a given rx_ring that is dedicated
1688 * to outbound completions.
1689 */
1690static void ql_tx_clean(struct work_struct *work)
1691{
1692 struct rx_ring *rx_ring =
1693 container_of(work, struct rx_ring, rx_work.work);
1694 ql_clean_outbound_rx_ring(rx_ring);
1695 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1696
1697}
1698
1699/* Worker thread to process a given rx_ring that is dedicated
1700 * to inbound completions.
1701 */
1702static void ql_rx_clean(struct work_struct *work)
1703{
1704 struct rx_ring *rx_ring =
1705 container_of(work, struct rx_ring, rx_work.work);
1706 ql_clean_inbound_rx_ring(rx_ring, 64);
1707 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1708}
1709
1710/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1711static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1712{
1713 struct rx_ring *rx_ring = dev_id;
1714 queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1715 &rx_ring->rx_work, 0);
1716 return IRQ_HANDLED;
1717}
1718
1719/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1720static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1721{
1722 struct rx_ring *rx_ring = dev_id;
1723 struct ql_adapter *qdev = rx_ring->qdev;
1724 netif_rx_schedule(qdev->ndev, &rx_ring->napi);
1725 return IRQ_HANDLED;
1726}
1727
1728/* We check here to see if we're already handling a legacy
1729 * interrupt. If we are, then it must belong to another
1730 * chip with which we're sharing the interrupt line.
1731 */
1732int ql_legacy_check(struct ql_adapter *qdev)
1733{
1734 int err;
1735 spin_lock(&qdev->legacy_lock);
1736 err = atomic_read(&qdev->intr_context[0].irq_cnt);
1737 spin_unlock(&qdev->legacy_lock);
1738 return err;
1739}
1740
1741/* This handles a fatal error, MPI activity, and the default
1742 * rx_ring in an MSI-X multiple vector environment.
1743 * In MSI/Legacy environment it also process the rest of
1744 * the rx_rings.
1745 */
1746static irqreturn_t qlge_isr(int irq, void *dev_id)
1747{
1748 struct rx_ring *rx_ring = dev_id;
1749 struct ql_adapter *qdev = rx_ring->qdev;
1750 struct intr_context *intr_context = &qdev->intr_context[0];
1751 u32 var;
1752 int i;
1753 int work_done = 0;
1754
1755 if (qdev->legacy_check && qdev->legacy_check(qdev)) {
1756 QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n");
1757 return IRQ_NONE; /* Not our interrupt */
1758 }
1759
1760 var = ql_read32(qdev, STS);
1761
1762 /*
1763 * Check for fatal error.
1764 */
1765 if (var & STS_FE) {
1766 ql_queue_asic_error(qdev);
1767 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1768 var = ql_read32(qdev, ERR_STS);
1769 QPRINTK(qdev, INTR, ERR,
1770 "Resetting chip. Error Status Register = 0x%x\n", var);
1771 return IRQ_HANDLED;
1772 }
1773
1774 /*
1775 * Check MPI processor activity.
1776 */
1777 if (var & STS_PI) {
1778 /*
1779 * We've got an async event or mailbox completion.
1780 * Handle it and clear the source of the interrupt.
1781 */
1782 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
1783 ql_disable_completion_interrupt(qdev, intr_context->intr);
1784 queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
1785 &qdev->mpi_work, 0);
1786 work_done++;
1787 }
1788
1789 /*
1790 * Check the default queue and wake handler if active.
1791 */
1792 rx_ring = &qdev->rx_ring[0];
1793 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
1794 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
1795 ql_disable_completion_interrupt(qdev, intr_context->intr);
1796 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
1797 &rx_ring->rx_work, 0);
1798 work_done++;
1799 }
1800
1801 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
1802 /*
1803 * Start the DPC for each active queue.
1804 */
1805 for (i = 1; i < qdev->rx_ring_count; i++) {
1806 rx_ring = &qdev->rx_ring[i];
1807 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1808 rx_ring->cnsmr_idx) {
1809 QPRINTK(qdev, INTR, INFO,
1810 "Waking handler for rx_ring[%d].\n", i);
1811 ql_disable_completion_interrupt(qdev,
1812 intr_context->
1813 intr);
1814 if (i < qdev->rss_ring_first_cq_id)
1815 queue_delayed_work_on(rx_ring->cpu,
1816 qdev->q_workqueue,
1817 &rx_ring->rx_work,
1818 0);
1819 else
1820 netif_rx_schedule(qdev->ndev,
1821 &rx_ring->napi);
1822 work_done++;
1823 }
1824 }
1825 }
1826 return work_done ? IRQ_HANDLED : IRQ_NONE;
1827}
1828
1829static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1830{
1831
1832 if (skb_is_gso(skb)) {
1833 int err;
1834 if (skb_header_cloned(skb)) {
1835 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1836 if (err)
1837 return err;
1838 }
1839
1840 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1841 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
1842 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1843 mac_iocb_ptr->total_hdrs_len =
1844 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
1845 mac_iocb_ptr->net_trans_offset =
1846 cpu_to_le16(skb_network_offset(skb) |
1847 skb_transport_offset(skb)
1848 << OB_MAC_TRANSPORT_HDR_SHIFT);
1849 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1850 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
1851 if (likely(skb->protocol == htons(ETH_P_IP))) {
1852 struct iphdr *iph = ip_hdr(skb);
1853 iph->check = 0;
1854 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1855 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1856 iph->daddr, 0,
1857 IPPROTO_TCP,
1858 0);
1859 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1860 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
1861 tcp_hdr(skb)->check =
1862 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1863 &ipv6_hdr(skb)->daddr,
1864 0, IPPROTO_TCP, 0);
1865 }
1866 return 1;
1867 }
1868 return 0;
1869}
1870
1871static void ql_hw_csum_setup(struct sk_buff *skb,
1872 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1873{
1874 int len;
1875 struct iphdr *iph = ip_hdr(skb);
1876 u16 *check;
1877 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1878 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1879 mac_iocb_ptr->net_trans_offset =
1880 cpu_to_le16(skb_network_offset(skb) |
1881 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
1882
1883 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1884 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
1885 if (likely(iph->protocol == IPPROTO_TCP)) {
1886 check = &(tcp_hdr(skb)->check);
1887 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
1888 mac_iocb_ptr->total_hdrs_len =
1889 cpu_to_le16(skb_transport_offset(skb) +
1890 (tcp_hdr(skb)->doff << 2));
1891 } else {
1892 check = &(udp_hdr(skb)->check);
1893 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
1894 mac_iocb_ptr->total_hdrs_len =
1895 cpu_to_le16(skb_transport_offset(skb) +
1896 sizeof(struct udphdr));
1897 }
1898 *check = ~csum_tcpudp_magic(iph->saddr,
1899 iph->daddr, len, iph->protocol, 0);
1900}
1901
1902static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1903{
1904 struct tx_ring_desc *tx_ring_desc;
1905 struct ob_mac_iocb_req *mac_iocb_ptr;
1906 struct ql_adapter *qdev = netdev_priv(ndev);
1907 int tso;
1908 struct tx_ring *tx_ring;
1909 u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
1910
1911 tx_ring = &qdev->tx_ring[tx_ring_idx];
1912
1913 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
1914 QPRINTK(qdev, TX_QUEUED, INFO,
1915 "%s: shutting down tx queue %d du to lack of resources.\n",
1916 __func__, tx_ring_idx);
1917 netif_stop_queue(ndev);
1918 atomic_inc(&tx_ring->queue_stopped);
1919 return NETDEV_TX_BUSY;
1920 }
1921 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
1922 mac_iocb_ptr = tx_ring_desc->queue_entry;
1923 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
1924 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
1925 QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
1926 return NETDEV_TX_BUSY;
1927 }
1928
1929 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
1930 mac_iocb_ptr->tid = tx_ring_desc->index;
1931 /* We use the upper 32-bits to store the tx queue for this IO.
1932 * When we get the completion we can use it to establish the context.
1933 */
1934 mac_iocb_ptr->txq_idx = tx_ring_idx;
1935 tx_ring_desc->skb = skb;
1936
1937 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
1938
1939 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
1940 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
1941 vlan_tx_tag_get(skb));
1942 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
1943 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
1944 }
1945 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1946 if (tso < 0) {
1947 dev_kfree_skb_any(skb);
1948 return NETDEV_TX_OK;
1949 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
1950 ql_hw_csum_setup(skb,
1951 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1952 }
1953 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
1954 tx_ring->prod_idx++;
1955 if (tx_ring->prod_idx == tx_ring->wq_len)
1956 tx_ring->prod_idx = 0;
1957 wmb();
1958
1959 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
1960 ndev->trans_start = jiffies;
1961 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
1962 tx_ring->prod_idx, skb->len);
1963
1964 atomic_dec(&tx_ring->tx_count);
1965 return NETDEV_TX_OK;
1966}
1967
1968static void ql_free_shadow_space(struct ql_adapter *qdev)
1969{
1970 if (qdev->rx_ring_shadow_reg_area) {
1971 pci_free_consistent(qdev->pdev,
1972 PAGE_SIZE,
1973 qdev->rx_ring_shadow_reg_area,
1974 qdev->rx_ring_shadow_reg_dma);
1975 qdev->rx_ring_shadow_reg_area = NULL;
1976 }
1977 if (qdev->tx_ring_shadow_reg_area) {
1978 pci_free_consistent(qdev->pdev,
1979 PAGE_SIZE,
1980 qdev->tx_ring_shadow_reg_area,
1981 qdev->tx_ring_shadow_reg_dma);
1982 qdev->tx_ring_shadow_reg_area = NULL;
1983 }
1984}
1985
1986static int ql_alloc_shadow_space(struct ql_adapter *qdev)
1987{
1988 qdev->rx_ring_shadow_reg_area =
1989 pci_alloc_consistent(qdev->pdev,
1990 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
1991 if (qdev->rx_ring_shadow_reg_area == NULL) {
1992 QPRINTK(qdev, IFUP, ERR,
1993 "Allocation of RX shadow space failed.\n");
1994 return -ENOMEM;
1995 }
1996 qdev->tx_ring_shadow_reg_area =
1997 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
1998 &qdev->tx_ring_shadow_reg_dma);
1999 if (qdev->tx_ring_shadow_reg_area == NULL) {
2000 QPRINTK(qdev, IFUP, ERR,
2001 "Allocation of TX shadow space failed.\n");
2002 goto err_wqp_sh_area;
2003 }
2004 return 0;
2005
2006err_wqp_sh_area:
2007 pci_free_consistent(qdev->pdev,
2008 PAGE_SIZE,
2009 qdev->rx_ring_shadow_reg_area,
2010 qdev->rx_ring_shadow_reg_dma);
2011 return -ENOMEM;
2012}
2013
2014static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2015{
2016 struct tx_ring_desc *tx_ring_desc;
2017 int i;
2018 struct ob_mac_iocb_req *mac_iocb_ptr;
2019
2020 mac_iocb_ptr = tx_ring->wq_base;
2021 tx_ring_desc = tx_ring->q;
2022 for (i = 0; i < tx_ring->wq_len; i++) {
2023 tx_ring_desc->index = i;
2024 tx_ring_desc->skb = NULL;
2025 tx_ring_desc->queue_entry = mac_iocb_ptr;
2026 mac_iocb_ptr++;
2027 tx_ring_desc++;
2028 }
2029 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2030 atomic_set(&tx_ring->queue_stopped, 0);
2031}
2032
2033static void ql_free_tx_resources(struct ql_adapter *qdev,
2034 struct tx_ring *tx_ring)
2035{
2036 if (tx_ring->wq_base) {
2037 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2038 tx_ring->wq_base, tx_ring->wq_base_dma);
2039 tx_ring->wq_base = NULL;
2040 }
2041 kfree(tx_ring->q);
2042 tx_ring->q = NULL;
2043}
2044
2045static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2046 struct tx_ring *tx_ring)
2047{
2048 tx_ring->wq_base =
2049 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2050 &tx_ring->wq_base_dma);
2051
2052 if ((tx_ring->wq_base == NULL)
2053 || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
2054 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2055 return -ENOMEM;
2056 }
2057 tx_ring->q =
2058 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2059 if (tx_ring->q == NULL)
2060 goto err;
2061
2062 return 0;
2063err:
2064 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2065 tx_ring->wq_base, tx_ring->wq_base_dma);
2066 return -ENOMEM;
2067}
2068
2069void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2070{
2071 int i;
2072 struct bq_desc *lbq_desc;
2073
2074 for (i = 0; i < rx_ring->lbq_len; i++) {
2075 lbq_desc = &rx_ring->lbq[i];
2076 if (lbq_desc->p.lbq_page) {
2077 pci_unmap_page(qdev->pdev,
2078 pci_unmap_addr(lbq_desc, mapaddr),
2079 pci_unmap_len(lbq_desc, maplen),
2080 PCI_DMA_FROMDEVICE);
2081
2082 put_page(lbq_desc->p.lbq_page);
2083 lbq_desc->p.lbq_page = NULL;
2084 }
2085 lbq_desc->bq->addr_lo = 0;
2086 lbq_desc->bq->addr_hi = 0;
2087 }
2088}
2089
2090/*
2091 * Allocate and map a page for each element of the lbq.
2092 */
2093static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2094 struct rx_ring *rx_ring)
2095{
2096 int i;
2097 struct bq_desc *lbq_desc;
2098 u64 map;
2099 struct bq_element *bq = rx_ring->lbq_base;
2100
2101 for (i = 0; i < rx_ring->lbq_len; i++) {
2102 lbq_desc = &rx_ring->lbq[i];
2103 memset(lbq_desc, 0, sizeof(lbq_desc));
2104 lbq_desc->bq = bq;
2105 lbq_desc->index = i;
2106 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2107 if (unlikely(!lbq_desc->p.lbq_page)) {
2108 QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
2109 goto mem_error;
2110 } else {
2111 map = pci_map_page(qdev->pdev,
2112 lbq_desc->p.lbq_page,
2113 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2114 if (pci_dma_mapping_error(qdev->pdev, map)) {
2115 QPRINTK(qdev, IFUP, ERR,
2116 "PCI mapping failed.\n");
2117 goto mem_error;
2118 }
2119 pci_unmap_addr_set(lbq_desc, mapaddr, map);
2120 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2121 bq->addr_lo = cpu_to_le32(map);
2122 bq->addr_hi = cpu_to_le32(map >> 32);
2123 }
2124 bq++;
2125 }
2126 return 0;
2127mem_error:
2128 ql_free_lbq_buffers(qdev, rx_ring);
2129 return -ENOMEM;
2130}
2131
2132void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2133{
2134 int i;
2135 struct bq_desc *sbq_desc;
2136
2137 for (i = 0; i < rx_ring->sbq_len; i++) {
2138 sbq_desc = &rx_ring->sbq[i];
2139 if (sbq_desc == NULL) {
2140 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2141 return;
2142 }
2143 if (sbq_desc->p.skb) {
2144 pci_unmap_single(qdev->pdev,
2145 pci_unmap_addr(sbq_desc, mapaddr),
2146 pci_unmap_len(sbq_desc, maplen),
2147 PCI_DMA_FROMDEVICE);
2148 dev_kfree_skb(sbq_desc->p.skb);
2149 sbq_desc->p.skb = NULL;
2150 }
2151 if (sbq_desc->bq == NULL) {
2152 QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n",
2153 i);
2154 return;
2155 }
2156 sbq_desc->bq->addr_lo = 0;
2157 sbq_desc->bq->addr_hi = 0;
2158 }
2159}
2160
2161/* Allocate and map an skb for each element of the sbq. */
2162static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2163 struct rx_ring *rx_ring)
2164{
2165 int i;
2166 struct bq_desc *sbq_desc;
2167 struct sk_buff *skb;
2168 u64 map;
2169 struct bq_element *bq = rx_ring->sbq_base;
2170
2171 for (i = 0; i < rx_ring->sbq_len; i++) {
2172 sbq_desc = &rx_ring->sbq[i];
2173 memset(sbq_desc, 0, sizeof(sbq_desc));
2174 sbq_desc->index = i;
2175 sbq_desc->bq = bq;
2176 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2177 if (unlikely(!skb)) {
2178 /* Better luck next round */
2179 QPRINTK(qdev, IFUP, ERR,
2180 "small buff alloc failed for %d bytes at index %d.\n",
2181 rx_ring->sbq_buf_size, i);
2182 goto mem_err;
2183 }
2184 skb_reserve(skb, QLGE_SB_PAD);
2185 sbq_desc->p.skb = skb;
2186 /*
2187 * Map only half the buffer. Because the
2188 * other half may get some data copied to it
2189 * when the completion arrives.
2190 */
2191 map = pci_map_single(qdev->pdev,
2192 skb->data,
2193 rx_ring->sbq_buf_size / 2,
2194 PCI_DMA_FROMDEVICE);
2195 if (pci_dma_mapping_error(qdev->pdev, map)) {
2196 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
2197 goto mem_err;
2198 }
2199 pci_unmap_addr_set(sbq_desc, mapaddr, map);
2200 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
2201 bq->addr_lo = /*sbq_desc->addr_lo = */
2202 cpu_to_le32(map);
2203 bq->addr_hi = /*sbq_desc->addr_hi = */
2204 cpu_to_le32(map >> 32);
2205 bq++;
2206 }
2207 return 0;
2208mem_err:
2209 ql_free_sbq_buffers(qdev, rx_ring);
2210 return -ENOMEM;
2211}
2212
2213static void ql_free_rx_resources(struct ql_adapter *qdev,
2214 struct rx_ring *rx_ring)
2215{
2216 if (rx_ring->sbq_len)
2217 ql_free_sbq_buffers(qdev, rx_ring);
2218 if (rx_ring->lbq_len)
2219 ql_free_lbq_buffers(qdev, rx_ring);
2220
2221 /* Free the small buffer queue. */
2222 if (rx_ring->sbq_base) {
2223 pci_free_consistent(qdev->pdev,
2224 rx_ring->sbq_size,
2225 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2226 rx_ring->sbq_base = NULL;
2227 }
2228
2229 /* Free the small buffer queue control blocks. */
2230 kfree(rx_ring->sbq);
2231 rx_ring->sbq = NULL;
2232
2233 /* Free the large buffer queue. */
2234 if (rx_ring->lbq_base) {
2235 pci_free_consistent(qdev->pdev,
2236 rx_ring->lbq_size,
2237 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2238 rx_ring->lbq_base = NULL;
2239 }
2240
2241 /* Free the large buffer queue control blocks. */
2242 kfree(rx_ring->lbq);
2243 rx_ring->lbq = NULL;
2244
2245 /* Free the rx queue. */
2246 if (rx_ring->cq_base) {
2247 pci_free_consistent(qdev->pdev,
2248 rx_ring->cq_size,
2249 rx_ring->cq_base, rx_ring->cq_base_dma);
2250 rx_ring->cq_base = NULL;
2251 }
2252}
2253
2254/* Allocate queues and buffers for this completions queue based
2255 * on the values in the parameter structure. */
2256static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2257 struct rx_ring *rx_ring)
2258{
2259
2260 /*
2261 * Allocate the completion queue for this rx_ring.
2262 */
2263 rx_ring->cq_base =
2264 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2265 &rx_ring->cq_base_dma);
2266
2267 if (rx_ring->cq_base == NULL) {
2268 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2269 return -ENOMEM;
2270 }
2271
2272 if (rx_ring->sbq_len) {
2273 /*
2274 * Allocate small buffer queue.
2275 */
2276 rx_ring->sbq_base =
2277 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2278 &rx_ring->sbq_base_dma);
2279
2280 if (rx_ring->sbq_base == NULL) {
2281 QPRINTK(qdev, IFUP, ERR,
2282 "Small buffer queue allocation failed.\n");
2283 goto err_mem;
2284 }
2285
2286 /*
2287 * Allocate small buffer queue control blocks.
2288 */
2289 rx_ring->sbq =
2290 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2291 GFP_KERNEL);
2292 if (rx_ring->sbq == NULL) {
2293 QPRINTK(qdev, IFUP, ERR,
2294 "Small buffer queue control block allocation failed.\n");
2295 goto err_mem;
2296 }
2297
2298 if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
2299 QPRINTK(qdev, IFUP, ERR,
2300 "Small buffer allocation failed.\n");
2301 goto err_mem;
2302 }
2303 }
2304
2305 if (rx_ring->lbq_len) {
2306 /*
2307 * Allocate large buffer queue.
2308 */
2309 rx_ring->lbq_base =
2310 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2311 &rx_ring->lbq_base_dma);
2312
2313 if (rx_ring->lbq_base == NULL) {
2314 QPRINTK(qdev, IFUP, ERR,
2315 "Large buffer queue allocation failed.\n");
2316 goto err_mem;
2317 }
2318 /*
2319 * Allocate large buffer queue control blocks.
2320 */
2321 rx_ring->lbq =
2322 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2323 GFP_KERNEL);
2324 if (rx_ring->lbq == NULL) {
2325 QPRINTK(qdev, IFUP, ERR,
2326 "Large buffer queue control block allocation failed.\n");
2327 goto err_mem;
2328 }
2329
2330 /*
2331 * Allocate the buffers.
2332 */
2333 if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
2334 QPRINTK(qdev, IFUP, ERR,
2335 "Large buffer allocation failed.\n");
2336 goto err_mem;
2337 }
2338 }
2339
2340 return 0;
2341
2342err_mem:
2343 ql_free_rx_resources(qdev, rx_ring);
2344 return -ENOMEM;
2345}
2346
2347static void ql_tx_ring_clean(struct ql_adapter *qdev)
2348{
2349 struct tx_ring *tx_ring;
2350 struct tx_ring_desc *tx_ring_desc;
2351 int i, j;
2352
2353 /*
2354 * Loop through all queues and free
2355 * any resources.
2356 */
2357 for (j = 0; j < qdev->tx_ring_count; j++) {
2358 tx_ring = &qdev->tx_ring[j];
2359 for (i = 0; i < tx_ring->wq_len; i++) {
2360 tx_ring_desc = &tx_ring->q[i];
2361 if (tx_ring_desc && tx_ring_desc->skb) {
2362 QPRINTK(qdev, IFDOWN, ERR,
2363 "Freeing lost SKB %p, from queue %d, index %d.\n",
2364 tx_ring_desc->skb, j,
2365 tx_ring_desc->index);
2366 ql_unmap_send(qdev, tx_ring_desc,
2367 tx_ring_desc->map_cnt);
2368 dev_kfree_skb(tx_ring_desc->skb);
2369 tx_ring_desc->skb = NULL;
2370 }
2371 }
2372 }
2373}
2374
2375static void ql_free_ring_cb(struct ql_adapter *qdev)
2376{
2377 kfree(qdev->ring_mem);
2378}
2379
2380static int ql_alloc_ring_cb(struct ql_adapter *qdev)
2381{
2382 /* Allocate space for tx/rx ring control blocks. */
2383 qdev->ring_mem_size =
2384 (qdev->tx_ring_count * sizeof(struct tx_ring)) +
2385 (qdev->rx_ring_count * sizeof(struct rx_ring));
2386 qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
2387 if (qdev->ring_mem == NULL) {
2388 return -ENOMEM;
2389 } else {
2390 qdev->rx_ring = qdev->ring_mem;
2391 qdev->tx_ring = qdev->ring_mem +
2392 (qdev->rx_ring_count * sizeof(struct rx_ring));
2393 }
2394 return 0;
2395}
2396
2397static void ql_free_mem_resources(struct ql_adapter *qdev)
2398{
2399 int i;
2400
2401 for (i = 0; i < qdev->tx_ring_count; i++)
2402 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2403 for (i = 0; i < qdev->rx_ring_count; i++)
2404 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2405 ql_free_shadow_space(qdev);
2406}
2407
2408static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2409{
2410 int i;
2411
2412 /* Allocate space for our shadow registers and such. */
2413 if (ql_alloc_shadow_space(qdev))
2414 return -ENOMEM;
2415
2416 for (i = 0; i < qdev->rx_ring_count; i++) {
2417 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2418 QPRINTK(qdev, IFUP, ERR,
2419 "RX resource allocation failed.\n");
2420 goto err_mem;
2421 }
2422 }
2423 /* Allocate tx queue resources */
2424 for (i = 0; i < qdev->tx_ring_count; i++) {
2425 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2426 QPRINTK(qdev, IFUP, ERR,
2427 "TX resource allocation failed.\n");
2428 goto err_mem;
2429 }
2430 }
2431 return 0;
2432
2433err_mem:
2434 ql_free_mem_resources(qdev);
2435 return -ENOMEM;
2436}
2437
2438/* Set up the rx ring control block and pass it to the chip.
2439 * The control block is defined as
2440 * "Completion Queue Initialization Control Block", or cqicb.
2441 */
2442static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2443{
2444 struct cqicb *cqicb = &rx_ring->cqicb;
2445 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2446 (rx_ring->cq_id * sizeof(u64) * 4);
2447 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2448 (rx_ring->cq_id * sizeof(u64) * 4);
2449 void __iomem *doorbell_area =
2450 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2451 int err = 0;
2452 u16 bq_len;
2453
2454 /* Set up the shadow registers for this ring. */
2455 rx_ring->prod_idx_sh_reg = shadow_reg;
2456 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2457 shadow_reg += sizeof(u64);
2458 shadow_reg_dma += sizeof(u64);
2459 rx_ring->lbq_base_indirect = shadow_reg;
2460 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2461 shadow_reg += sizeof(u64);
2462 shadow_reg_dma += sizeof(u64);
2463 rx_ring->sbq_base_indirect = shadow_reg;
2464 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2465
2466 /* PCI doorbell mem area + 0x00 for consumer index register */
2467 rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area;
2468 rx_ring->cnsmr_idx = 0;
2469 rx_ring->curr_entry = rx_ring->cq_base;
2470
2471 /* PCI doorbell mem area + 0x04 for valid register */
2472 rx_ring->valid_db_reg = doorbell_area + 0x04;
2473
2474 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2475 rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18);
2476
2477 /* PCI doorbell mem area + 0x1c */
2478 rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c);
2479
2480 memset((void *)cqicb, 0, sizeof(struct cqicb));
2481 cqicb->msix_vect = rx_ring->irq;
2482
2483 cqicb->len = cpu_to_le16(rx_ring->cq_len | LEN_V | LEN_CPP_CONT);
2484
2485 cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma);
2486 cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
2487
2488 cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma);
2489 cqicb->prod_idx_addr_hi =
2490 cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
2491
2492 /*
2493 * Set up the control block load flags.
2494 */
2495 cqicb->flags = FLAGS_LC | /* Load queue base address */
2496 FLAGS_LV | /* Load MSI-X vector */
2497 FLAGS_LI; /* Load irq delay values */
2498 if (rx_ring->lbq_len) {
2499 cqicb->flags |= FLAGS_LL; /* Load lbq values */
2500 *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
2501 cqicb->lbq_addr_lo =
2502 cpu_to_le32(rx_ring->lbq_base_indirect_dma);
2503 cqicb->lbq_addr_hi =
2504 cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
2505 cqicb->lbq_buf_size = cpu_to_le32(rx_ring->lbq_buf_size);
2506 bq_len = (u16) rx_ring->lbq_len;
2507 cqicb->lbq_len = cpu_to_le16(bq_len);
2508 rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
2509 rx_ring->lbq_curr_idx = 0;
2510 rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
2511 rx_ring->lbq_free_cnt = 16;
2512 }
2513 if (rx_ring->sbq_len) {
2514 cqicb->flags |= FLAGS_LS; /* Load sbq values */
2515 *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
2516 cqicb->sbq_addr_lo =
2517 cpu_to_le32(rx_ring->sbq_base_indirect_dma);
2518 cqicb->sbq_addr_hi =
2519 cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
2520 cqicb->sbq_buf_size =
2521 cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
2522 bq_len = (u16) rx_ring->sbq_len;
2523 cqicb->sbq_len = cpu_to_le16(bq_len);
2524 rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
2525 rx_ring->sbq_curr_idx = 0;
2526 rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
2527 rx_ring->sbq_free_cnt = 16;
2528 }
2529 switch (rx_ring->type) {
2530 case TX_Q:
2531 /* If there's only one interrupt, then we use
2532 * worker threads to process the outbound
2533 * completion handling rx_rings. We do this so
2534 * they can be run on multiple CPUs. There is
2535 * room to play with this more where we would only
2536 * run in a worker if there are more than x number
2537 * of outbound completions on the queue and more
2538 * than one queue active. Some threshold that
2539 * would indicate a benefit in spite of the cost
2540 * of a context switch.
2541 * If there's more than one interrupt, then the
2542 * outbound completions are processed in the ISR.
2543 */
2544 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2545 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2546 else {
2547 /* With all debug warnings on we see a WARN_ON message
2548 * when we free the skb in the interrupt context.
2549 */
2550 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2551 }
2552 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2553 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2554 break;
2555 case DEFAULT_Q:
2556 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2557 cqicb->irq_delay = 0;
2558 cqicb->pkt_delay = 0;
2559 break;
2560 case RX_Q:
2561 /* Inbound completion handling rx_rings run in
2562 * separate NAPI contexts.
2563 */
2564 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2565 64);
2566 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2567 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2568 break;
2569 default:
2570 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2571 rx_ring->type);
2572 }
2573 QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
2574 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2575 CFG_LCQ, rx_ring->cq_id);
2576 if (err) {
2577 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2578 return err;
2579 }
2580 QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
2581 /*
2582 * Advance the producer index for the buffer queues.
2583 */
2584 wmb();
2585 if (rx_ring->lbq_len)
2586 ql_write_db_reg(rx_ring->lbq_prod_idx,
2587 rx_ring->lbq_prod_idx_db_reg);
2588 if (rx_ring->sbq_len)
2589 ql_write_db_reg(rx_ring->sbq_prod_idx,
2590 rx_ring->sbq_prod_idx_db_reg);
2591 return err;
2592}
2593
2594static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2595{
2596 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2597 void __iomem *doorbell_area =
2598 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2599 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2600 (tx_ring->wq_id * sizeof(u64));
2601 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2602 (tx_ring->wq_id * sizeof(u64));
2603 int err = 0;
2604
2605 /*
2606 * Assign doorbell registers for this tx_ring.
2607 */
2608 /* TX PCI doorbell mem area for tx producer index */
2609 tx_ring->prod_idx_db_reg = (u32 *) doorbell_area;
2610 tx_ring->prod_idx = 0;
2611 /* TX PCI doorbell mem area + 0x04 */
2612 tx_ring->valid_db_reg = doorbell_area + 0x04;
2613
2614 /*
2615 * Assign shadow registers for this tx_ring.
2616 */
2617 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2618 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2619
2620 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2621 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2622 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2623 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2624 wqicb->rid = 0;
2625 wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma);
2626 wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
2627
2628 wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma);
2629 wqicb->cnsmr_idx_addr_hi =
2630 cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
2631
2632 ql_init_tx_ring(qdev, tx_ring);
2633
2634 err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
2635 (u16) tx_ring->wq_id);
2636 if (err) {
2637 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2638 return err;
2639 }
2640 QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
2641 return err;
2642}
2643
2644static void ql_disable_msix(struct ql_adapter *qdev)
2645{
2646 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2647 pci_disable_msix(qdev->pdev);
2648 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2649 kfree(qdev->msi_x_entry);
2650 qdev->msi_x_entry = NULL;
2651 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2652 pci_disable_msi(qdev->pdev);
2653 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2654 }
2655}
2656
2657static void ql_enable_msix(struct ql_adapter *qdev)
2658{
2659 int i;
2660
2661 qdev->intr_count = 1;
2662 /* Get the MSIX vectors. */
2663 if (irq_type == MSIX_IRQ) {
2664 /* Try to alloc space for the msix struct,
2665 * if it fails then go to MSI/legacy.
2666 */
2667 qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
2668 sizeof(struct msix_entry),
2669 GFP_KERNEL);
2670 if (!qdev->msi_x_entry) {
2671 irq_type = MSI_IRQ;
2672 goto msi;
2673 }
2674
2675 for (i = 0; i < qdev->rx_ring_count; i++)
2676 qdev->msi_x_entry[i].entry = i;
2677
2678 if (!pci_enable_msix
2679 (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2680 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2681 qdev->intr_count = qdev->rx_ring_count;
2682 QPRINTK(qdev, IFUP, INFO,
2683 "MSI-X Enabled, got %d vectors.\n",
2684 qdev->intr_count);
2685 return;
2686 } else {
2687 kfree(qdev->msi_x_entry);
2688 qdev->msi_x_entry = NULL;
2689 QPRINTK(qdev, IFUP, WARNING,
2690 "MSI-X Enable failed, trying MSI.\n");
2691 irq_type = MSI_IRQ;
2692 }
2693 }
2694msi:
2695 if (irq_type == MSI_IRQ) {
2696 if (!pci_enable_msi(qdev->pdev)) {
2697 set_bit(QL_MSI_ENABLED, &qdev->flags);
2698 QPRINTK(qdev, IFUP, INFO,
2699 "Running with MSI interrupts.\n");
2700 return;
2701 }
2702 }
2703 irq_type = LEG_IRQ;
2704 spin_lock_init(&qdev->legacy_lock);
2705 qdev->legacy_check = ql_legacy_check;
2706 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2707}
2708
2709/*
2710 * Here we build the intr_context structures based on
2711 * our rx_ring count and intr vector count.
2712 * The intr_context structure is used to hook each vector
2713 * to possibly different handlers.
2714 */
2715static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2716{
2717 int i = 0;
2718 struct intr_context *intr_context = &qdev->intr_context[0];
2719
2720 ql_enable_msix(qdev);
2721
2722 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2723 /* Each rx_ring has it's
2724 * own intr_context since we have separate
2725 * vectors for each queue.
2726 * This only true when MSI-X is enabled.
2727 */
2728 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2729 qdev->rx_ring[i].irq = i;
2730 intr_context->intr = i;
2731 intr_context->qdev = qdev;
2732 /*
2733 * We set up each vectors enable/disable/read bits so
2734 * there's no bit/mask calculations in the critical path.
2735 */
2736 intr_context->intr_en_mask =
2737 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2738 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2739 | i;
2740 intr_context->intr_dis_mask =
2741 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2742 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2743 INTR_EN_IHD | i;
2744 intr_context->intr_read_mask =
2745 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2746 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2747 i;
2748
2749 if (i == 0) {
2750 /*
2751 * Default queue handles bcast/mcast plus
2752 * async events. Needs buffers.
2753 */
2754 intr_context->handler = qlge_isr;
2755 sprintf(intr_context->name, "%s-default-queue",
2756 qdev->ndev->name);
2757 } else if (i < qdev->rss_ring_first_cq_id) {
2758 /*
2759 * Outbound queue is for outbound completions only.
2760 */
2761 intr_context->handler = qlge_msix_tx_isr;
2762 sprintf(intr_context->name, "%s-txq-%d",
2763 qdev->ndev->name, i);
2764 } else {
2765 /*
2766 * Inbound queues handle unicast frames only.
2767 */
2768 intr_context->handler = qlge_msix_rx_isr;
2769 sprintf(intr_context->name, "%s-rxq-%d",
2770 qdev->ndev->name, i);
2771 }
2772 }
2773 } else {
2774 /*
2775 * All rx_rings use the same intr_context since
2776 * there is only one vector.
2777 */
2778 intr_context->intr = 0;
2779 intr_context->qdev = qdev;
2780 /*
2781 * We set up each vectors enable/disable/read bits so
2782 * there's no bit/mask calculations in the critical path.
2783 */
2784 intr_context->intr_en_mask =
2785 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2786 intr_context->intr_dis_mask =
2787 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2788 INTR_EN_TYPE_DISABLE;
2789 intr_context->intr_read_mask =
2790 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2791 /*
2792 * Single interrupt means one handler for all rings.
2793 */
2794 intr_context->handler = qlge_isr;
2795 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2796 for (i = 0; i < qdev->rx_ring_count; i++)
2797 qdev->rx_ring[i].irq = 0;
2798 }
2799}
2800
2801static void ql_free_irq(struct ql_adapter *qdev)
2802{
2803 int i;
2804 struct intr_context *intr_context = &qdev->intr_context[0];
2805
2806 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2807 if (intr_context->hooked) {
2808 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2809 free_irq(qdev->msi_x_entry[i].vector,
2810 &qdev->rx_ring[i]);
2811 QPRINTK(qdev, IFDOWN, ERR,
2812 "freeing msix interrupt %d.\n", i);
2813 } else {
2814 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
2815 QPRINTK(qdev, IFDOWN, ERR,
2816 "freeing msi interrupt %d.\n", i);
2817 }
2818 }
2819 }
2820 ql_disable_msix(qdev);
2821}
2822
2823static int ql_request_irq(struct ql_adapter *qdev)
2824{
2825 int i;
2826 int status = 0;
2827 struct pci_dev *pdev = qdev->pdev;
2828 struct intr_context *intr_context = &qdev->intr_context[0];
2829
2830 ql_resolve_queues_to_irqs(qdev);
2831
2832 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2833 atomic_set(&intr_context->irq_cnt, 0);
2834 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2835 status = request_irq(qdev->msi_x_entry[i].vector,
2836 intr_context->handler,
2837 0,
2838 intr_context->name,
2839 &qdev->rx_ring[i]);
2840 if (status) {
2841 QPRINTK(qdev, IFUP, ERR,
2842 "Failed request for MSIX interrupt %d.\n",
2843 i);
2844 goto err_irq;
2845 } else {
2846 QPRINTK(qdev, IFUP, INFO,
2847 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2848 i,
2849 qdev->rx_ring[i].type ==
2850 DEFAULT_Q ? "DEFAULT_Q" : "",
2851 qdev->rx_ring[i].type ==
2852 TX_Q ? "TX_Q" : "",
2853 qdev->rx_ring[i].type ==
2854 RX_Q ? "RX_Q" : "", intr_context->name);
2855 }
2856 } else {
2857 QPRINTK(qdev, IFUP, DEBUG,
2858 "trying msi or legacy interrupts.\n");
2859 QPRINTK(qdev, IFUP, DEBUG,
2860 "%s: irq = %d.\n", __func__, pdev->irq);
2861 QPRINTK(qdev, IFUP, DEBUG,
2862 "%s: context->name = %s.\n", __func__,
2863 intr_context->name);
2864 QPRINTK(qdev, IFUP, DEBUG,
2865 "%s: dev_id = 0x%p.\n", __func__,
2866 &qdev->rx_ring[0]);
2867 status =
2868 request_irq(pdev->irq, qlge_isr,
2869 test_bit(QL_MSI_ENABLED,
2870 &qdev->
2871 flags) ? 0 : IRQF_SHARED,
2872 intr_context->name, &qdev->rx_ring[0]);
2873 if (status)
2874 goto err_irq;
2875
2876 QPRINTK(qdev, IFUP, ERR,
2877 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2878 i,
2879 qdev->rx_ring[0].type ==
2880 DEFAULT_Q ? "DEFAULT_Q" : "",
2881 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
2882 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
2883 intr_context->name);
2884 }
2885 intr_context->hooked = 1;
2886 }
2887 return status;
2888err_irq:
2889 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
2890 ql_free_irq(qdev);
2891 return status;
2892}
2893
2894static int ql_start_rss(struct ql_adapter *qdev)
2895{
2896 struct ricb *ricb = &qdev->ricb;
2897 int status = 0;
2898 int i;
2899 u8 *hash_id = (u8 *) ricb->hash_cq_id;
2900
2901 memset((void *)ricb, 0, sizeof(ricb));
2902
2903 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
2904 ricb->flags =
2905 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
2906 RSS_RT6);
2907 ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
2908
2909 /*
2910 * Fill out the Indirection Table.
2911 */
2912 for (i = 0; i < 32; i++)
2913 hash_id[i] = i & 1;
2914
2915 /*
2916 * Random values for the IPv6 and IPv4 Hash Keys.
2917 */
2918 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
2919 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
2920
2921 QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
2922
2923 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
2924 if (status) {
2925 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
2926 return status;
2927 }
2928 QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
2929 return status;
2930}
2931
2932/* Initialize the frame-to-queue routing. */
2933static int ql_route_initialize(struct ql_adapter *qdev)
2934{
2935 int status = 0;
2936 int i;
2937
2938 /* Clear all the entries in the routing table. */
2939 for (i = 0; i < 16; i++) {
2940 status = ql_set_routing_reg(qdev, i, 0, 0);
2941 if (status) {
2942 QPRINTK(qdev, IFUP, ERR,
2943 "Failed to init routing register for CAM packets.\n");
2944 return status;
2945 }
2946 }
2947
2948 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
2949 if (status) {
2950 QPRINTK(qdev, IFUP, ERR,
2951 "Failed to init routing register for error packets.\n");
2952 return status;
2953 }
2954 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
2955 if (status) {
2956 QPRINTK(qdev, IFUP, ERR,
2957 "Failed to init routing register for broadcast packets.\n");
2958 return status;
2959 }
2960 /* If we have more than one inbound queue, then turn on RSS in the
2961 * routing block.
2962 */
2963 if (qdev->rss_ring_count > 1) {
2964 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
2965 RT_IDX_RSS_MATCH, 1);
2966 if (status) {
2967 QPRINTK(qdev, IFUP, ERR,
2968 "Failed to init routing register for MATCH RSS packets.\n");
2969 return status;
2970 }
2971 }
2972
2973 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
2974 RT_IDX_CAM_HIT, 1);
2975 if (status) {
2976 QPRINTK(qdev, IFUP, ERR,
2977 "Failed to init routing register for CAM packets.\n");
2978 return status;
2979 }
2980 return status;
2981}
2982
2983static int ql_adapter_initialize(struct ql_adapter *qdev)
2984{
2985 u32 value, mask;
2986 int i;
2987 int status = 0;
2988
2989 /*
2990 * Set up the System register to halt on errors.
2991 */
2992 value = SYS_EFE | SYS_FAE;
2993 mask = value << 16;
2994 ql_write32(qdev, SYS, mask | value);
2995
2996 /* Set the default queue. */
2997 value = NIC_RCV_CFG_DFQ;
2998 mask = NIC_RCV_CFG_DFQ_MASK;
2999 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3000
3001 /* Set the MPI interrupt to enabled. */
3002 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3003
3004 /* Enable the function, set pagesize, enable error checking. */
3005 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3006 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3007
3008 /* Set/clear header splitting. */
3009 mask = FSC_VM_PAGESIZE_MASK |
3010 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3011 ql_write32(qdev, FSC, mask | value);
3012
3013 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3014 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3015
3016 /* Start up the rx queues. */
3017 for (i = 0; i < qdev->rx_ring_count; i++) {
3018 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3019 if (status) {
3020 QPRINTK(qdev, IFUP, ERR,
3021 "Failed to start rx ring[%d].\n", i);
3022 return status;
3023 }
3024 }
3025
3026 /* If there is more than one inbound completion queue
3027 * then download a RICB to configure RSS.
3028 */
3029 if (qdev->rss_ring_count > 1) {
3030 status = ql_start_rss(qdev);
3031 if (status) {
3032 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3033 return status;
3034 }
3035 }
3036
3037 /* Start up the tx queues. */
3038 for (i = 0; i < qdev->tx_ring_count; i++) {
3039 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3040 if (status) {
3041 QPRINTK(qdev, IFUP, ERR,
3042 "Failed to start tx ring[%d].\n", i);
3043 return status;
3044 }
3045 }
3046
3047 status = ql_port_initialize(qdev);
3048 if (status) {
3049 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3050 return status;
3051 }
3052
3053 status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
3054 MAC_ADDR_TYPE_CAM_MAC, qdev->func);
3055 if (status) {
3056 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3057 return status;
3058 }
3059
3060 status = ql_route_initialize(qdev);
3061 if (status) {
3062 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3063 return status;
3064 }
3065
3066 /* Start NAPI for the RSS queues. */
3067 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
3068 QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
3069 i);
3070 napi_enable(&qdev->rx_ring[i].napi);
3071 }
3072
3073 return status;
3074}
3075
3076/* Issue soft reset to chip. */
3077static int ql_adapter_reset(struct ql_adapter *qdev)
3078{
3079 u32 value;
3080 int max_wait_time;
3081 int status = 0;
3082 int resetCnt = 0;
3083
3084#define MAX_RESET_CNT 1
3085issueReset:
3086 resetCnt++;
3087 QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
3088 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3089 /* Wait for reset to complete. */
3090 max_wait_time = 3;
3091 QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
3092 max_wait_time);
3093 do {
3094 value = ql_read32(qdev, RST_FO);
3095 if ((value & RST_FO_FR) == 0)
3096 break;
3097
3098 ssleep(1);
3099 } while ((--max_wait_time));
3100 if (value & RST_FO_FR) {
3101 QPRINTK(qdev, IFDOWN, ERR,
3102 "Stuck in SoftReset: FSC_SR:0x%08x\n", value);
3103 if (resetCnt < MAX_RESET_CNT)
3104 goto issueReset;
3105 }
3106 if (max_wait_time == 0) {
3107 status = -ETIMEDOUT;
3108 QPRINTK(qdev, IFDOWN, ERR,
3109 "ETIMEOUT!!! errored out of resetting the chip!\n");
3110 }
3111
3112 return status;
3113}
3114
3115static void ql_display_dev_info(struct net_device *ndev)
3116{
3117 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3118
3119 QPRINTK(qdev, PROBE, INFO,
3120 "Function #%d, NIC Roll %d, NIC Rev = %d, "
3121 "XG Roll = %d, XG Rev = %d.\n",
3122 qdev->func,
3123 qdev->chip_rev_id & 0x0000000f,
3124 qdev->chip_rev_id >> 4 & 0x0000000f,
3125 qdev->chip_rev_id >> 8 & 0x0000000f,
3126 qdev->chip_rev_id >> 12 & 0x0000000f);
3127 QPRINTK(qdev, PROBE, INFO,
3128 "MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
3129 ndev->dev_addr[0], ndev->dev_addr[1],
3130 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
3131 ndev->dev_addr[5]);
3132}
3133
3134static int ql_adapter_down(struct ql_adapter *qdev)
3135{
3136 struct net_device *ndev = qdev->ndev;
3137 int i, status = 0;
3138 struct rx_ring *rx_ring;
3139
3140 netif_stop_queue(ndev);
3141 netif_carrier_off(ndev);
3142
3143 cancel_delayed_work_sync(&qdev->asic_reset_work);
3144 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3145 cancel_delayed_work_sync(&qdev->mpi_work);
3146
3147 /* The default queue at index 0 is always processed in
3148 * a workqueue.
3149 */
3150 cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3151
3152 /* The rest of the rx_rings are processed in
3153 * a workqueue only if it's a single interrupt
3154 * environment (MSI/Legacy).
3155 */
3156 for (i = 1; i > qdev->rx_ring_count; i++) {
3157 rx_ring = &qdev->rx_ring[i];
3158 /* Only the RSS rings use NAPI on multi irq
3159 * environment. Outbound completion processing
3160 * is done in interrupt context.
3161 */
3162 if (i >= qdev->rss_ring_first_cq_id) {
3163 napi_disable(&rx_ring->napi);
3164 } else {
3165 cancel_delayed_work_sync(&rx_ring->rx_work);
3166 }
3167 }
3168
3169 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3170
3171 ql_disable_interrupts(qdev);
3172
3173 ql_tx_ring_clean(qdev);
3174
3175 spin_lock(&qdev->hw_lock);
3176 status = ql_adapter_reset(qdev);
3177 if (status)
3178 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3179 qdev->func);
3180 spin_unlock(&qdev->hw_lock);
3181 return status;
3182}
3183
3184static int ql_adapter_up(struct ql_adapter *qdev)
3185{
3186 int err = 0;
3187
3188 spin_lock(&qdev->hw_lock);
3189 err = ql_adapter_initialize(qdev);
3190 if (err) {
3191 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3192 spin_unlock(&qdev->hw_lock);
3193 goto err_init;
3194 }
3195 spin_unlock(&qdev->hw_lock);
3196 set_bit(QL_ADAPTER_UP, &qdev->flags);
3197 ql_enable_interrupts(qdev);
3198 ql_enable_all_completion_interrupts(qdev);
3199 if ((ql_read32(qdev, STS) & qdev->port_init)) {
3200 netif_carrier_on(qdev->ndev);
3201 netif_start_queue(qdev->ndev);
3202 }
3203
3204 return 0;
3205err_init:
3206 ql_adapter_reset(qdev);
3207 return err;
3208}
3209
3210static int ql_cycle_adapter(struct ql_adapter *qdev)
3211{
3212 int status;
3213
3214 status = ql_adapter_down(qdev);
3215 if (status)
3216 goto error;
3217
3218 status = ql_adapter_up(qdev);
3219 if (status)
3220 goto error;
3221
3222 return status;
3223error:
3224 QPRINTK(qdev, IFUP, ALERT,
3225 "Driver up/down cycle failed, closing device\n");
3226 rtnl_lock();
3227 dev_close(qdev->ndev);
3228 rtnl_unlock();
3229 return status;
3230}
3231
3232static void ql_release_adapter_resources(struct ql_adapter *qdev)
3233{
3234 ql_free_mem_resources(qdev);
3235 ql_free_irq(qdev);
3236}
3237
3238static int ql_get_adapter_resources(struct ql_adapter *qdev)
3239{
3240 int status = 0;
3241
3242 if (ql_alloc_mem_resources(qdev)) {
3243 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3244 return -ENOMEM;
3245 }
3246 status = ql_request_irq(qdev);
3247 if (status)
3248 goto err_irq;
3249 return status;
3250err_irq:
3251 ql_free_mem_resources(qdev);
3252 return status;
3253}
3254
3255static int qlge_close(struct net_device *ndev)
3256{
3257 struct ql_adapter *qdev = netdev_priv(ndev);
3258
3259 /*
3260 * Wait for device to recover from a reset.
3261 * (Rarely happens, but possible.)
3262 */
3263 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3264 msleep(1);
3265 ql_adapter_down(qdev);
3266 ql_release_adapter_resources(qdev);
3267 ql_free_ring_cb(qdev);
3268 return 0;
3269}
3270
3271static int ql_configure_rings(struct ql_adapter *qdev)
3272{
3273 int i;
3274 struct rx_ring *rx_ring;
3275 struct tx_ring *tx_ring;
3276 int cpu_cnt = num_online_cpus();
3277
3278 /*
3279 * For each processor present we allocate one
3280 * rx_ring for outbound completions, and one
3281 * rx_ring for inbound completions. Plus there is
3282 * always the one default queue. For the CPU
3283 * counts we end up with the following rx_rings:
3284 * rx_ring count =
3285 * one default queue +
3286 * (CPU count * outbound completion rx_ring) +
3287 * (CPU count * inbound (RSS) completion rx_ring)
3288 * To keep it simple we limit the total number of
3289 * queues to < 32, so we truncate CPU to 8.
3290 * This limitation can be removed when requested.
3291 */
3292
3293 if (cpu_cnt > 8)
3294 cpu_cnt = 8;
3295
3296 /*
3297 * rx_ring[0] is always the default queue.
3298 */
3299 /* Allocate outbound completion ring for each CPU. */
3300 qdev->tx_ring_count = cpu_cnt;
3301 /* Allocate inbound completion (RSS) ring for each CPU. */
3302 qdev->rss_ring_count = cpu_cnt;
3303 /* cq_id for the first inbound ring handler. */
3304 qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3305 /*
3306 * qdev->rx_ring_count:
3307 * Total number of rx_rings. This includes the one
3308 * default queue, a number of outbound completion
3309 * handler rx_rings, and the number of inbound
3310 * completion handler rx_rings.
3311 */
3312 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3313
3314 if (ql_alloc_ring_cb(qdev))
3315 return -ENOMEM;
3316
3317 for (i = 0; i < qdev->tx_ring_count; i++) {
3318 tx_ring = &qdev->tx_ring[i];
3319 memset((void *)tx_ring, 0, sizeof(tx_ring));
3320 tx_ring->qdev = qdev;
3321 tx_ring->wq_id = i;
3322 tx_ring->wq_len = qdev->tx_ring_size;
3323 tx_ring->wq_size =
3324 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3325
3326 /*
3327 * The completion queue ID for the tx rings start
3328 * immediately after the default Q ID, which is zero.
3329 */
3330 tx_ring->cq_id = i + 1;
3331 }
3332
3333 for (i = 0; i < qdev->rx_ring_count; i++) {
3334 rx_ring = &qdev->rx_ring[i];
3335 memset((void *)rx_ring, 0, sizeof(rx_ring));
3336 rx_ring->qdev = qdev;
3337 rx_ring->cq_id = i;
3338 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3339 if (i == 0) { /* Default queue at index 0. */
3340 /*
3341 * Default queue handles bcast/mcast plus
3342 * async events. Needs buffers.
3343 */
3344 rx_ring->cq_len = qdev->rx_ring_size;
3345 rx_ring->cq_size =
3346 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3347 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3348 rx_ring->lbq_size =
3349 rx_ring->lbq_len * sizeof(struct bq_element);
3350 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3351 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3352 rx_ring->sbq_size =
3353 rx_ring->sbq_len * sizeof(struct bq_element);
3354 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3355 rx_ring->type = DEFAULT_Q;
3356 } else if (i < qdev->rss_ring_first_cq_id) {
3357 /*
3358 * Outbound queue handles outbound completions only.
3359 */
3360 /* outbound cq is same size as tx_ring it services. */
3361 rx_ring->cq_len = qdev->tx_ring_size;
3362 rx_ring->cq_size =
3363 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3364 rx_ring->lbq_len = 0;
3365 rx_ring->lbq_size = 0;
3366 rx_ring->lbq_buf_size = 0;
3367 rx_ring->sbq_len = 0;
3368 rx_ring->sbq_size = 0;
3369 rx_ring->sbq_buf_size = 0;
3370 rx_ring->type = TX_Q;
3371 } else { /* Inbound completions (RSS) queues */
3372 /*
3373 * Inbound queues handle unicast frames only.
3374 */
3375 rx_ring->cq_len = qdev->rx_ring_size;
3376 rx_ring->cq_size =
3377 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3378 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3379 rx_ring->lbq_size =
3380 rx_ring->lbq_len * sizeof(struct bq_element);
3381 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3382 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3383 rx_ring->sbq_size =
3384 rx_ring->sbq_len * sizeof(struct bq_element);
3385 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3386 rx_ring->type = RX_Q;
3387 }
3388 }
3389 return 0;
3390}
3391
3392static int qlge_open(struct net_device *ndev)
3393{
3394 int err = 0;
3395 struct ql_adapter *qdev = netdev_priv(ndev);
3396
3397 err = ql_configure_rings(qdev);
3398 if (err)
3399 return err;
3400
3401 err = ql_get_adapter_resources(qdev);
3402 if (err)
3403 goto error_up;
3404
3405 err = ql_adapter_up(qdev);
3406 if (err)
3407 goto error_up;
3408
3409 return err;
3410
3411error_up:
3412 ql_release_adapter_resources(qdev);
3413 ql_free_ring_cb(qdev);
3414 return err;
3415}
3416
3417static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3418{
3419 struct ql_adapter *qdev = netdev_priv(ndev);
3420
3421 if (ndev->mtu == 1500 && new_mtu == 9000) {
3422 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3423 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3424 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3425 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3426 (ndev->mtu == 9000 && new_mtu == 9000)) {
3427 return 0;
3428 } else
3429 return -EINVAL;
3430 ndev->mtu = new_mtu;
3431 return 0;
3432}
3433
3434static struct net_device_stats *qlge_get_stats(struct net_device
3435 *ndev)
3436{
3437 struct ql_adapter *qdev = netdev_priv(ndev);
3438 return &qdev->stats;
3439}
3440
3441static void qlge_set_multicast_list(struct net_device *ndev)
3442{
3443 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3444 struct dev_mc_list *mc_ptr;
3445 int i;
3446
3447 spin_lock(&qdev->hw_lock);
3448 /*
3449 * Set or clear promiscuous mode if a
3450 * transition is taking place.
3451 */
3452 if (ndev->flags & IFF_PROMISC) {
3453 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3454 if (ql_set_routing_reg
3455 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3456 QPRINTK(qdev, HW, ERR,
3457 "Failed to set promiscous mode.\n");
3458 } else {
3459 set_bit(QL_PROMISCUOUS, &qdev->flags);
3460 }
3461 }
3462 } else {
3463 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3464 if (ql_set_routing_reg
3465 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3466 QPRINTK(qdev, HW, ERR,
3467 "Failed to clear promiscous mode.\n");
3468 } else {
3469 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3470 }
3471 }
3472 }
3473
3474 /*
3475 * Set or clear all multicast mode if a
3476 * transition is taking place.
3477 */
3478 if ((ndev->flags & IFF_ALLMULTI) ||
3479 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3480 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3481 if (ql_set_routing_reg
3482 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3483 QPRINTK(qdev, HW, ERR,
3484 "Failed to set all-multi mode.\n");
3485 } else {
3486 set_bit(QL_ALLMULTI, &qdev->flags);
3487 }
3488 }
3489 } else {
3490 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3491 if (ql_set_routing_reg
3492 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3493 QPRINTK(qdev, HW, ERR,
3494 "Failed to clear all-multi mode.\n");
3495 } else {
3496 clear_bit(QL_ALLMULTI, &qdev->flags);
3497 }
3498 }
3499 }
3500
3501 if (ndev->mc_count) {
3502 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3503 i++, mc_ptr = mc_ptr->next)
3504 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3505 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3506 QPRINTK(qdev, HW, ERR,
3507 "Failed to loadmulticast address.\n");
3508 goto exit;
3509 }
3510 if (ql_set_routing_reg
3511 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3512 QPRINTK(qdev, HW, ERR,
3513 "Failed to set multicast match mode.\n");
3514 } else {
3515 set_bit(QL_ALLMULTI, &qdev->flags);
3516 }
3517 }
3518exit:
3519 spin_unlock(&qdev->hw_lock);
3520}
3521
3522static int qlge_set_mac_address(struct net_device *ndev, void *p)
3523{
3524 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3525 struct sockaddr *addr = p;
3526
3527 if (netif_running(ndev))
3528 return -EBUSY;
3529
3530 if (!is_valid_ether_addr(addr->sa_data))
3531 return -EADDRNOTAVAIL;
3532 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3533
3534 spin_lock(&qdev->hw_lock);
3535 if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3536 MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
3537 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3538 return -1;
3539 }
3540 spin_unlock(&qdev->hw_lock);
3541
3542 return 0;
3543}
3544
3545static void qlge_tx_timeout(struct net_device *ndev)
3546{
3547 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3548 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
3549}
3550
3551static void ql_asic_reset_work(struct work_struct *work)
3552{
3553 struct ql_adapter *qdev =
3554 container_of(work, struct ql_adapter, asic_reset_work.work);
3555 ql_cycle_adapter(qdev);
3556}
3557
3558static void ql_get_board_info(struct ql_adapter *qdev)
3559{
3560 qdev->func =
3561 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3562 if (qdev->func) {
3563 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3564 qdev->port_link_up = STS_PL1;
3565 qdev->port_init = STS_PI1;
3566 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3567 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3568 } else {
3569 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3570 qdev->port_link_up = STS_PL0;
3571 qdev->port_init = STS_PI0;
3572 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3573 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3574 }
3575 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3576}
3577
3578static void ql_release_all(struct pci_dev *pdev)
3579{
3580 struct net_device *ndev = pci_get_drvdata(pdev);
3581 struct ql_adapter *qdev = netdev_priv(ndev);
3582
3583 if (qdev->workqueue) {
3584 destroy_workqueue(qdev->workqueue);
3585 qdev->workqueue = NULL;
3586 }
3587 if (qdev->q_workqueue) {
3588 destroy_workqueue(qdev->q_workqueue);
3589 qdev->q_workqueue = NULL;
3590 }
3591 if (qdev->reg_base)
3592 iounmap((void *)qdev->reg_base);
3593 if (qdev->doorbell_area)
3594 iounmap(qdev->doorbell_area);
3595 pci_release_regions(pdev);
3596 pci_set_drvdata(pdev, NULL);
3597}
3598
3599static int __devinit ql_init_device(struct pci_dev *pdev,
3600 struct net_device *ndev, int cards_found)
3601{
3602 struct ql_adapter *qdev = netdev_priv(ndev);
3603 int pos, err = 0;
3604 u16 val16;
3605
3606 memset((void *)qdev, 0, sizeof(qdev));
3607 err = pci_enable_device(pdev);
3608 if (err) {
3609 dev_err(&pdev->dev, "PCI device enable failed.\n");
3610 return err;
3611 }
3612
3613 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3614 if (pos <= 0) {
3615 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3616 "aborting.\n");
3617 goto err_out;
3618 } else {
3619 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3620 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3621 val16 |= (PCI_EXP_DEVCTL_CERE |
3622 PCI_EXP_DEVCTL_NFERE |
3623 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3624 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3625 }
3626
3627 err = pci_request_regions(pdev, DRV_NAME);
3628 if (err) {
3629 dev_err(&pdev->dev, "PCI region request failed.\n");
3630 goto err_out;
3631 }
3632
3633 pci_set_master(pdev);
3634 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3635 set_bit(QL_DMA64, &qdev->flags);
3636 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3637 } else {
3638 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3639 if (!err)
3640 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3641 }
3642
3643 if (err) {
3644 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3645 goto err_out;
3646 }
3647
3648 pci_set_drvdata(pdev, ndev);
3649 qdev->reg_base =
3650 ioremap_nocache(pci_resource_start(pdev, 1),
3651 pci_resource_len(pdev, 1));
3652 if (!qdev->reg_base) {
3653 dev_err(&pdev->dev, "Register mapping failed.\n");
3654 err = -ENOMEM;
3655 goto err_out;
3656 }
3657
3658 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3659 qdev->doorbell_area =
3660 ioremap_nocache(pci_resource_start(pdev, 3),
3661 pci_resource_len(pdev, 3));
3662 if (!qdev->doorbell_area) {
3663 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3664 err = -ENOMEM;
3665 goto err_out;
3666 }
3667
3668 ql_get_board_info(qdev);
3669 qdev->ndev = ndev;
3670 qdev->pdev = pdev;
3671 qdev->msg_enable = netif_msg_init(debug, default_msg);
3672 spin_lock_init(&qdev->hw_lock);
3673 spin_lock_init(&qdev->stats_lock);
3674
3675 /* make sure the EEPROM is good */
3676 err = ql_get_flash_params(qdev);
3677 if (err) {
3678 dev_err(&pdev->dev, "Invalid FLASH.\n");
3679 goto err_out;
3680 }
3681
3682 if (!is_valid_ether_addr(qdev->flash.mac_addr))
3683 goto err_out;
3684
3685 memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
3686 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3687
3688 /* Set up the default ring sizes. */
3689 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3690 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3691
3692 /* Set up the coalescing parameters. */
3693 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3694 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3695 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3696 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3697
3698 /*
3699 * Set up the operating parameters.
3700 */
3701 qdev->rx_csum = 1;
3702
3703 qdev->q_workqueue = create_workqueue(ndev->name);
3704 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3705 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3706 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3707 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3708
3709 if (!cards_found) {
3710 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3711 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3712 DRV_NAME, DRV_VERSION);
3713 }
3714 return 0;
3715err_out:
3716 ql_release_all(pdev);
3717 pci_disable_device(pdev);
3718 return err;
3719}
3720
3721static int __devinit qlge_probe(struct pci_dev *pdev,
3722 const struct pci_device_id *pci_entry)
3723{
3724 struct net_device *ndev = NULL;
3725 struct ql_adapter *qdev = NULL;
3726 static int cards_found = 0;
3727 int err = 0;
3728
3729 ndev = alloc_etherdev(sizeof(struct ql_adapter));
3730 if (!ndev)
3731 return -ENOMEM;
3732
3733 err = ql_init_device(pdev, ndev, cards_found);
3734 if (err < 0) {
3735 free_netdev(ndev);
3736 return err;
3737 }
3738
3739 qdev = netdev_priv(ndev);
3740 SET_NETDEV_DEV(ndev, &pdev->dev);
3741 ndev->features = (0
3742 | NETIF_F_IP_CSUM
3743 | NETIF_F_SG
3744 | NETIF_F_TSO
3745 | NETIF_F_TSO6
3746 | NETIF_F_TSO_ECN
3747 | NETIF_F_HW_VLAN_TX
3748 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
3749
3750 if (test_bit(QL_DMA64, &qdev->flags))
3751 ndev->features |= NETIF_F_HIGHDMA;
3752
3753 /*
3754 * Set up net_device structure.
3755 */
3756 ndev->tx_queue_len = qdev->tx_ring_size;
3757 ndev->irq = pdev->irq;
3758 ndev->open = qlge_open;
3759 ndev->stop = qlge_close;
3760 ndev->hard_start_xmit = qlge_send;
3761 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
3762 ndev->change_mtu = qlge_change_mtu;
3763 ndev->get_stats = qlge_get_stats;
3764 ndev->set_multicast_list = qlge_set_multicast_list;
3765 ndev->set_mac_address = qlge_set_mac_address;
3766 ndev->tx_timeout = qlge_tx_timeout;
3767 ndev->watchdog_timeo = 10 * HZ;
3768 ndev->vlan_rx_register = ql_vlan_rx_register;
3769 ndev->vlan_rx_add_vid = ql_vlan_rx_add_vid;
3770 ndev->vlan_rx_kill_vid = ql_vlan_rx_kill_vid;
3771 err = register_netdev(ndev);
3772 if (err) {
3773 dev_err(&pdev->dev, "net device registration failed.\n");
3774 ql_release_all(pdev);
3775 pci_disable_device(pdev);
3776 return err;
3777 }
3778 netif_carrier_off(ndev);
3779 netif_stop_queue(ndev);
3780 ql_display_dev_info(ndev);
3781 cards_found++;
3782 return 0;
3783}
3784
3785static void __devexit qlge_remove(struct pci_dev *pdev)
3786{
3787 struct net_device *ndev = pci_get_drvdata(pdev);
3788 unregister_netdev(ndev);
3789 ql_release_all(pdev);
3790 pci_disable_device(pdev);
3791 free_netdev(ndev);
3792}
3793
3794/*
3795 * This callback is called by the PCI subsystem whenever
3796 * a PCI bus error is detected.
3797 */
3798static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
3799 enum pci_channel_state state)
3800{
3801 struct net_device *ndev = pci_get_drvdata(pdev);
3802 struct ql_adapter *qdev = netdev_priv(ndev);
3803
3804 if (netif_running(ndev))
3805 ql_adapter_down(qdev);
3806
3807 pci_disable_device(pdev);
3808
3809 /* Request a slot reset. */
3810 return PCI_ERS_RESULT_NEED_RESET;
3811}
3812
3813/*
3814 * This callback is called after the PCI buss has been reset.
3815 * Basically, this tries to restart the card from scratch.
3816 * This is a shortened version of the device probe/discovery code,
3817 * it resembles the first-half of the () routine.
3818 */
3819static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
3820{
3821 struct net_device *ndev = pci_get_drvdata(pdev);
3822 struct ql_adapter *qdev = netdev_priv(ndev);
3823
3824 if (pci_enable_device(pdev)) {
3825 QPRINTK(qdev, IFUP, ERR,
3826 "Cannot re-enable PCI device after reset.\n");
3827 return PCI_ERS_RESULT_DISCONNECT;
3828 }
3829
3830 pci_set_master(pdev);
3831
3832 netif_carrier_off(ndev);
3833 netif_stop_queue(ndev);
3834 ql_adapter_reset(qdev);
3835
3836 /* Make sure the EEPROM is good */
3837 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3838
3839 if (!is_valid_ether_addr(ndev->perm_addr)) {
3840 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
3841 return PCI_ERS_RESULT_DISCONNECT;
3842 }
3843
3844 return PCI_ERS_RESULT_RECOVERED;
3845}
3846
3847static void qlge_io_resume(struct pci_dev *pdev)
3848{
3849 struct net_device *ndev = pci_get_drvdata(pdev);
3850 struct ql_adapter *qdev = netdev_priv(ndev);
3851
3852 pci_set_master(pdev);
3853
3854 if (netif_running(ndev)) {
3855 if (ql_adapter_up(qdev)) {
3856 QPRINTK(qdev, IFUP, ERR,
3857 "Device initialization failed after reset.\n");
3858 return;
3859 }
3860 }
3861
3862 netif_device_attach(ndev);
3863}
3864
3865static struct pci_error_handlers qlge_err_handler = {
3866 .error_detected = qlge_io_error_detected,
3867 .slot_reset = qlge_io_slot_reset,
3868 .resume = qlge_io_resume,
3869};
3870
3871static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
3872{
3873 struct net_device *ndev = pci_get_drvdata(pdev);
3874 struct ql_adapter *qdev = netdev_priv(ndev);
3875 int err;
3876
3877 netif_device_detach(ndev);
3878
3879 if (netif_running(ndev)) {
3880 err = ql_adapter_down(qdev);
3881 if (!err)
3882 return err;
3883 }
3884
3885 err = pci_save_state(pdev);
3886 if (err)
3887 return err;
3888
3889 pci_disable_device(pdev);
3890
3891 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3892
3893 return 0;
3894}
3895
3896static int qlge_resume(struct pci_dev *pdev)
3897{
3898 struct net_device *ndev = pci_get_drvdata(pdev);
3899 struct ql_adapter *qdev = netdev_priv(ndev);
3900 int err;
3901
3902 pci_set_power_state(pdev, PCI_D0);
3903 pci_restore_state(pdev);
3904 err = pci_enable_device(pdev);
3905 if (err) {
3906 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
3907 return err;
3908 }
3909 pci_set_master(pdev);
3910
3911 pci_enable_wake(pdev, PCI_D3hot, 0);
3912 pci_enable_wake(pdev, PCI_D3cold, 0);
3913
3914 if (netif_running(ndev)) {
3915 err = ql_adapter_up(qdev);
3916 if (err)
3917 return err;
3918 }
3919
3920 netif_device_attach(ndev);
3921
3922 return 0;
3923}
3924
3925static void qlge_shutdown(struct pci_dev *pdev)
3926{
3927 qlge_suspend(pdev, PMSG_SUSPEND);
3928}
3929
3930static struct pci_driver qlge_driver = {
3931 .name = DRV_NAME,
3932 .id_table = qlge_pci_tbl,
3933 .probe = qlge_probe,
3934 .remove = __devexit_p(qlge_remove),
3935#ifdef CONFIG_PM
3936 .suspend = qlge_suspend,
3937 .resume = qlge_resume,
3938#endif
3939 .shutdown = qlge_shutdown,
3940 .err_handler = &qlge_err_handler
3941};
3942
3943static int __init qlge_init_module(void)
3944{
3945 return pci_register_driver(&qlge_driver);
3946}
3947
3948static void __exit qlge_exit(void)
3949{
3950 pci_unregister_driver(&qlge_driver);
3951}
3952
3953module_init(qlge_init_module);
3954module_exit(qlge_exit);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
new file mode 100644
index 000000000000..24fe344bcf1f
--- /dev/null
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -0,0 +1,150 @@
1#include "qlge.h"
2
3static int ql_read_mbox_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
4{
5 int status;
6 /* wait for reg to come ready */
7 status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
8 if (status)
9 goto exit;
10 /* set up for reg read */
11 ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
12 /* wait for reg to come ready */
13 status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
14 if (status)
15 goto exit;
16 /* get the data */
17 *data = ql_read32(qdev, PROC_DATA);
18exit:
19 return status;
20}
21
22int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
23{
24 int i, status;
25
26 status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
27 if (status)
28 return -EBUSY;
29 for (i = 0; i < mbcp->out_count; i++) {
30 status =
31 ql_read_mbox_reg(qdev, qdev->mailbox_out + i,
32 &mbcp->mbox_out[i]);
33 if (status) {
34 QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
35 break;
36 }
37 }
38 ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
39 return status;
40}
41
42static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
43{
44 mbcp->out_count = 2;
45
46 if (ql_get_mb_sts(qdev, mbcp))
47 goto exit;
48
49 qdev->link_status = mbcp->mbox_out[1];
50 QPRINTK(qdev, DRV, ERR, "Link Up.\n");
51 QPRINTK(qdev, DRV, INFO, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
52 if (!netif_carrier_ok(qdev->ndev)) {
53 QPRINTK(qdev, LINK, INFO, "Link is Up.\n");
54 netif_carrier_on(qdev->ndev);
55 netif_wake_queue(qdev->ndev);
56 }
57exit:
58 /* Clear the MPI firmware status. */
59 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
60}
61
62static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
63{
64 mbcp->out_count = 3;
65
66 if (ql_get_mb_sts(qdev, mbcp)) {
67 QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
68 goto exit;
69 }
70
71 if (netif_carrier_ok(qdev->ndev)) {
72 QPRINTK(qdev, LINK, INFO, "Link is Down.\n");
73 netif_carrier_off(qdev->ndev);
74 netif_stop_queue(qdev->ndev);
75 }
76 QPRINTK(qdev, DRV, ERR, "Link Down.\n");
77 QPRINTK(qdev, DRV, ERR, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
78exit:
79 /* Clear the MPI firmware status. */
80 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
81}
82
83static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
84{
85 mbcp->out_count = 2;
86
87 if (ql_get_mb_sts(qdev, mbcp)) {
88 QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
89 goto exit;
90 }
91 QPRINTK(qdev, DRV, ERR, "Firmware initialized!\n");
92 QPRINTK(qdev, DRV, ERR, "Firmware status = 0x%.08x.\n",
93 mbcp->mbox_out[0]);
94 QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
95 mbcp->mbox_out[1]);
96exit:
97 /* Clear the MPI firmware status. */
98 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
99}
100
101void ql_mpi_work(struct work_struct *work)
102{
103 struct ql_adapter *qdev =
104 container_of(work, struct ql_adapter, mpi_work.work);
105 struct mbox_params mbc;
106 struct mbox_params *mbcp = &mbc;
107 mbcp->out_count = 1;
108
109 while (ql_read32(qdev, STS) & STS_PI) {
110 if (ql_get_mb_sts(qdev, mbcp)) {
111 QPRINTK(qdev, DRV, ERR,
112 "Could not read MPI, resetting ASIC!\n");
113 ql_queue_asic_error(qdev);
114 }
115
116 switch (mbcp->mbox_out[0]) {
117 case AEN_LINK_UP:
118 ql_link_up(qdev, mbcp);
119 break;
120 case AEN_LINK_DOWN:
121 ql_link_down(qdev, mbcp);
122 break;
123 case AEN_FW_INIT_DONE:
124 ql_init_fw_done(qdev, mbcp);
125 break;
126 case MB_CMD_STS_GOOD:
127 break;
128 case AEN_FW_INIT_FAIL:
129 case AEN_SYS_ERR:
130 case MB_CMD_STS_ERR:
131 ql_queue_fw_error(qdev);
132 default:
133 /* Clear the MPI firmware status. */
134 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
135 break;
136 }
137 }
138 ql_enable_completion_interrupt(qdev, 0);
139}
140
141void ql_mpi_reset_work(struct work_struct *work)
142{
143 struct ql_adapter *qdev =
144 container_of(work, struct ql_adapter, mpi_reset_work.work);
145 QPRINTK(qdev, DRV, ERR,
146 "Enter, qdev = %p..\n", qdev);
147 ql_write32(qdev, CSR, CSR_CMD_SET_RST);
148 msleep(50);
149 ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
150}