aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/qla3xxx.c3537
-rw-r--r--drivers/net/qla3xxx.h1194
4 files changed, 4741 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7c826db84604..3a0d80b28503 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2249,6 +2249,15 @@ config MV643XX_ETH_2
2249 This enables support for Port 2 of the Marvell MV643XX Gigabit 2249 This enables support for Port 2 of the Marvell MV643XX Gigabit
2250 Ethernet. 2250 Ethernet.
2251 2251
2252config QLA3XXX
2253 tristate "QLogic QLA3XXX Network Driver Support"
2254 depends on PCI
2255 help
2256 This driver supports QLogic ISP3XXX gigabit Ethernet cards.
2257
2258 To compile this driver as a module, choose M here: the module
2259 will be called qla3xxx.
2260
2252endmenu 2261endmenu
2253 2262
2254# 2263#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 038a8e02f643..5e91c3562ad2 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
106obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o 106obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
107 107
108obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 108obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
109obj-$(CONFIG_QLA3XXX) += qla3xxx.o
109 110
110obj-$(CONFIG_PPP) += ppp_generic.o 111obj-$(CONFIG_PPP) += ppp_generic.o
111obj-$(CONFIG_PPP_ASYNC) += ppp_async.o 112obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
new file mode 100644
index 000000000000..c729aeeb4696
--- /dev/null
+++ b/drivers/net/qla3xxx.c
@@ -0,0 +1,3537 @@
1/*
2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/dmapool.h>
18#include <linux/mempool.h>
19#include <linux/spinlock.h>
20#include <linux/kthread.h>
21#include <linux/interrupt.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/ip.h>
25#include <linux/if_arp.h>
26#include <linux/if_ether.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/ethtool.h>
30#include <linux/skbuff.h>
31#include <linux/rtnetlink.h>
32#include <linux/if_vlan.h>
33#include <linux/init.h>
34#include <linux/delay.h>
35#include <linux/mm.h>
36
37#include "qla3xxx.h"
38
39#define DRV_NAME "qla3xxx"
40#define DRV_STRING "QLogic ISP3XXX Network Driver"
41#define DRV_VERSION "v2.02.00-k36"
42#define PFX DRV_NAME " "
43
44static const char ql3xxx_driver_name[] = DRV_NAME;
45static const char ql3xxx_driver_version[] = DRV_VERSION;
46
47MODULE_AUTHOR("QLogic Corporation");
48MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
49MODULE_LICENSE("GPL");
50MODULE_VERSION(DRV_VERSION);
51
52static const u32 default_msg
53 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
54 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
55
56static int debug = -1; /* defaults above */
57module_param(debug, int, 0);
58MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
59
60static int msi;
61module_param(msi, int, 0);
62MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63
64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
66 /* required last entry */
67 {0,}
68};
69
70MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
71
72/*
73 * Caller must take hw_lock.
74 */
75static int ql_sem_spinlock(struct ql3_adapter *qdev,
76 u32 sem_mask, u32 sem_bits)
77{
78 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
79 u32 value;
80 unsigned int seconds = 3;
81
82 do {
83 writel((sem_mask | sem_bits),
84 &port_regs->CommonRegs.semaphoreReg);
85 value = readl(&port_regs->CommonRegs.semaphoreReg);
86 if ((value & (sem_mask >> 16)) == sem_bits)
87 return 0;
88 ssleep(1);
89 } while(--seconds);
90 return -1;
91}
92
93static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
94{
95 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
96 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
97 readl(&port_regs->CommonRegs.semaphoreReg);
98}
99
100static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
101{
102 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
103 u32 value;
104
105 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
106 value = readl(&port_regs->CommonRegs.semaphoreReg);
107 return ((value & (sem_mask >> 16)) == sem_bits);
108}
109
110/*
111 * Caller holds hw_lock.
112 */
113static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
114{
115 int i = 0;
116
117 while (1) {
118 if (!ql_sem_lock(qdev,
119 QL_DRVR_SEM_MASK,
120 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
121 * 2) << 1)) {
122 if (i < 10) {
123 ssleep(1);
124 i++;
125 } else {
126 printk(KERN_ERR PFX "%s: Timed out waiting for "
127 "driver lock...\n",
128 qdev->ndev->name);
129 return 0;
130 }
131 } else {
132 printk(KERN_DEBUG PFX
133 "%s: driver lock acquired.\n",
134 qdev->ndev->name);
135 return 1;
136 }
137 }
138}
139
140static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
141{
142 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
143
144 writel(((ISP_CONTROL_NP_MASK << 16) | page),
145 &port_regs->CommonRegs.ispControlStatus);
146 readl(&port_regs->CommonRegs.ispControlStatus);
147 qdev->current_page = page;
148}
149
150static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
151 u32 __iomem * reg)
152{
153 u32 value;
154 unsigned long hw_flags;
155
156 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
157 value = readl(reg);
158 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
159
160 return value;
161}
162
163static u32 ql_read_common_reg(struct ql3_adapter *qdev,
164 u32 __iomem * reg)
165{
166 return readl(reg);
167}
168
169static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
170{
171 u32 value;
172 unsigned long hw_flags;
173
174 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
175
176 if (qdev->current_page != 0)
177 ql_set_register_page(qdev,0);
178 value = readl(reg);
179
180 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
181 return value;
182}
183
184static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
185{
186 if (qdev->current_page != 0)
187 ql_set_register_page(qdev,0);
188 return readl(reg);
189}
190
191static void ql_write_common_reg_l(struct ql3_adapter *qdev,
192 u32 * reg, u32 value)
193{
194 unsigned long hw_flags;
195
196 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
197 writel(value, (u32 *) reg);
198 readl(reg);
199 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
200 return;
201}
202
203static void ql_write_common_reg(struct ql3_adapter *qdev,
204 u32 * reg, u32 value)
205{
206 writel(value, (u32 *) reg);
207 readl(reg);
208 return;
209}
210
211static void ql_write_page0_reg(struct ql3_adapter *qdev,
212 u32 * reg, u32 value)
213{
214 if (qdev->current_page != 0)
215 ql_set_register_page(qdev,0);
216 writel(value, (u32 *) reg);
217 readl(reg);
218 return;
219}
220
221/*
222 * Caller holds hw_lock. Only called during init.
223 */
224static void ql_write_page1_reg(struct ql3_adapter *qdev,
225 u32 * reg, u32 value)
226{
227 if (qdev->current_page != 1)
228 ql_set_register_page(qdev,1);
229 writel(value, (u32 *) reg);
230 readl(reg);
231 return;
232}
233
234/*
235 * Caller holds hw_lock. Only called during init.
236 */
237static void ql_write_page2_reg(struct ql3_adapter *qdev,
238 u32 * reg, u32 value)
239{
240 if (qdev->current_page != 2)
241 ql_set_register_page(qdev,2);
242 writel(value, (u32 *) reg);
243 readl(reg);
244 return;
245}
246
247static void ql_disable_interrupts(struct ql3_adapter *qdev)
248{
249 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
250
251 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
252 (ISP_IMR_ENABLE_INT << 16));
253
254}
255
256static void ql_enable_interrupts(struct ql3_adapter *qdev)
257{
258 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
259
260 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
261 ((0xff << 16) | ISP_IMR_ENABLE_INT));
262
263}
264
265static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
266 struct ql_rcv_buf_cb *lrg_buf_cb)
267{
268 u64 map;
269 lrg_buf_cb->next = NULL;
270
271 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
272 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
273 } else {
274 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
275 qdev->lrg_buf_free_tail = lrg_buf_cb;
276 }
277
278 if (!lrg_buf_cb->skb) {
279 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
280 if (unlikely(!lrg_buf_cb->skb)) {
281 printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n",
282 qdev->ndev->name);
283 qdev->lrg_buf_skb_check++;
284 } else {
285 /*
286 * We save some space to copy the ethhdr from first
287 * buffer
288 */
289 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
290 map = pci_map_single(qdev->pdev,
291 lrg_buf_cb->skb->data,
292 qdev->lrg_buffer_len -
293 QL_HEADER_SPACE,
294 PCI_DMA_FROMDEVICE);
295 lrg_buf_cb->buf_phy_addr_low =
296 cpu_to_le32(LS_64BITS(map));
297 lrg_buf_cb->buf_phy_addr_high =
298 cpu_to_le32(MS_64BITS(map));
299 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
300 pci_unmap_len_set(lrg_buf_cb, maplen,
301 qdev->lrg_buffer_len -
302 QL_HEADER_SPACE);
303 }
304 }
305
306 qdev->lrg_buf_free_count++;
307}
308
309static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
310 *qdev)
311{
312 struct ql_rcv_buf_cb *lrg_buf_cb;
313
314 if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
315 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
316 qdev->lrg_buf_free_tail = NULL;
317 qdev->lrg_buf_free_count--;
318 }
319
320 return lrg_buf_cb;
321}
322
323static u32 addrBits = EEPROM_NO_ADDR_BITS;
324static u32 dataBits = EEPROM_NO_DATA_BITS;
325
326static void fm93c56a_deselect(struct ql3_adapter *qdev);
327static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
328 unsigned short *value);
329
330/*
331 * Caller holds hw_lock.
332 */
333static void fm93c56a_select(struct ql3_adapter *qdev)
334{
335 struct ql3xxx_port_registers __iomem *port_regs =
336 qdev->mem_map_registers;
337
338 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
339 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
340 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
341 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
342 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
343}
344
345/*
346 * Caller holds hw_lock.
347 */
348static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
349{
350 int i;
351 u32 mask;
352 u32 dataBit;
353 u32 previousBit;
354 struct ql3xxx_port_registers __iomem *port_regs =
355 qdev->mem_map_registers;
356
357 /* Clock in a zero, then do the start bit */
358 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
359 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
360 AUBURN_EEPROM_DO_1);
361 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
362 ISP_NVRAM_MASK | qdev->
363 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
364 AUBURN_EEPROM_CLK_RISE);
365 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
366 ISP_NVRAM_MASK | qdev->
367 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
368 AUBURN_EEPROM_CLK_FALL);
369
370 mask = 1 << (FM93C56A_CMD_BITS - 1);
371 /* Force the previous data bit to be different */
372 previousBit = 0xffff;
373 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
374 dataBit =
375 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
376 if (previousBit != dataBit) {
377 /*
378 * If the bit changed, then change the DO state to
379 * match
380 */
381 ql_write_common_reg(qdev,
382 &port_regs->CommonRegs.
383 serialPortInterfaceReg,
384 ISP_NVRAM_MASK | qdev->
385 eeprom_cmd_data | dataBit);
386 previousBit = dataBit;
387 }
388 ql_write_common_reg(qdev,
389 &port_regs->CommonRegs.
390 serialPortInterfaceReg,
391 ISP_NVRAM_MASK | qdev->
392 eeprom_cmd_data | dataBit |
393 AUBURN_EEPROM_CLK_RISE);
394 ql_write_common_reg(qdev,
395 &port_regs->CommonRegs.
396 serialPortInterfaceReg,
397 ISP_NVRAM_MASK | qdev->
398 eeprom_cmd_data | dataBit |
399 AUBURN_EEPROM_CLK_FALL);
400 cmd = cmd << 1;
401 }
402
403 mask = 1 << (addrBits - 1);
404 /* Force the previous data bit to be different */
405 previousBit = 0xffff;
406 for (i = 0; i < addrBits; i++) {
407 dataBit =
408 (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
409 AUBURN_EEPROM_DO_0;
410 if (previousBit != dataBit) {
411 /*
412 * If the bit changed, then change the DO state to
413 * match
414 */
415 ql_write_common_reg(qdev,
416 &port_regs->CommonRegs.
417 serialPortInterfaceReg,
418 ISP_NVRAM_MASK | qdev->
419 eeprom_cmd_data | dataBit);
420 previousBit = dataBit;
421 }
422 ql_write_common_reg(qdev,
423 &port_regs->CommonRegs.
424 serialPortInterfaceReg,
425 ISP_NVRAM_MASK | qdev->
426 eeprom_cmd_data | dataBit |
427 AUBURN_EEPROM_CLK_RISE);
428 ql_write_common_reg(qdev,
429 &port_regs->CommonRegs.
430 serialPortInterfaceReg,
431 ISP_NVRAM_MASK | qdev->
432 eeprom_cmd_data | dataBit |
433 AUBURN_EEPROM_CLK_FALL);
434 eepromAddr = eepromAddr << 1;
435 }
436}
437
438/*
439 * Caller holds hw_lock.
440 */
441static void fm93c56a_deselect(struct ql3_adapter *qdev)
442{
443 struct ql3xxx_port_registers __iomem *port_regs =
444 qdev->mem_map_registers;
445 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
446 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
447 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
448}
449
450/*
451 * Caller holds hw_lock.
452 */
453static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
454{
455 int i;
456 u32 data = 0;
457 u32 dataBit;
458 struct ql3xxx_port_registers __iomem *port_regs =
459 qdev->mem_map_registers;
460
461 /* Read the data bits */
462 /* The first bit is a dummy. Clock right over it. */
463 for (i = 0; i < dataBits; i++) {
464 ql_write_common_reg(qdev,
465 &port_regs->CommonRegs.
466 serialPortInterfaceReg,
467 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
468 AUBURN_EEPROM_CLK_RISE);
469 ql_write_common_reg(qdev,
470 &port_regs->CommonRegs.
471 serialPortInterfaceReg,
472 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
473 AUBURN_EEPROM_CLK_FALL);
474 dataBit =
475 (ql_read_common_reg
476 (qdev,
477 &port_regs->CommonRegs.
478 serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
479 data = (data << 1) | dataBit;
480 }
481 *value = (u16) data;
482}
483
484/*
485 * Caller holds hw_lock.
486 */
487static void eeprom_readword(struct ql3_adapter *qdev,
488 u32 eepromAddr, unsigned short *value)
489{
490 fm93c56a_select(qdev);
491 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
492 fm93c56a_datain(qdev, value);
493 fm93c56a_deselect(qdev);
494}
495
496static void ql_swap_mac_addr(u8 * macAddress)
497{
498#ifdef __BIG_ENDIAN
499 u8 temp;
500 temp = macAddress[0];
501 macAddress[0] = macAddress[1];
502 macAddress[1] = temp;
503 temp = macAddress[2];
504 macAddress[2] = macAddress[3];
505 macAddress[3] = temp;
506 temp = macAddress[4];
507 macAddress[4] = macAddress[5];
508 macAddress[5] = temp;
509#endif
510}
511
512static int ql_get_nvram_params(struct ql3_adapter *qdev)
513{
514 u16 *pEEPROMData;
515 u16 checksum = 0;
516 u32 index;
517 unsigned long hw_flags;
518
519 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
520
521 pEEPROMData = (u16 *) & qdev->nvram_data;
522 qdev->eeprom_cmd_data = 0;
523 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
524 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
525 2) << 10)) {
526 printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
527 __func__);
528 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
529 return -1;
530 }
531
532 for (index = 0; index < EEPROM_SIZE; index++) {
533 eeprom_readword(qdev, index, pEEPROMData);
534 checksum += *pEEPROMData;
535 pEEPROMData++;
536 }
537 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
538
539 if (checksum != 0) {
540 printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
541 qdev->ndev->name, checksum);
542 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
543 return -1;
544 }
545
546 /*
547 * We have a problem with endianness for the MAC addresses
548 * and the two 8-bit values version, and numPorts. We
549 * have to swap them on big endian systems.
550 */
551 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress);
552 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress);
553 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress);
554 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress);
555 pEEPROMData = (u16 *) & qdev->nvram_data.version;
556 *pEEPROMData = le16_to_cpu(*pEEPROMData);
557
558 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
559 return checksum;
560}
561
562static const u32 PHYAddr[2] = {
563 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
564};
565
566static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
567{
568 struct ql3xxx_port_registers __iomem *port_regs =
569 qdev->mem_map_registers;
570 u32 temp;
571 int count = 1000;
572
573 while (count) {
574 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
575 if (!(temp & MAC_MII_STATUS_BSY))
576 return 0;
577 udelay(10);
578 count--;
579 }
580 return -1;
581}
582
583static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
584{
585 struct ql3xxx_port_registers __iomem *port_regs =
586 qdev->mem_map_registers;
587 u32 scanControl;
588
589 if (qdev->numPorts > 1) {
590 /* Auto scan will cycle through multiple ports */
591 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
592 } else {
593 scanControl = MAC_MII_CONTROL_SC;
594 }
595
596 /*
597 * Scan register 1 of PHY/PETBI,
598 * Set up to scan both devices
599 * The autoscan starts from the first register, completes
600 * the last one before rolling over to the first
601 */
602 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
603 PHYAddr[0] | MII_SCAN_REGISTER);
604
605 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
606 (scanControl) |
607 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
608}
609
610static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
611{
612 u8 ret;
613 struct ql3xxx_port_registers __iomem *port_regs =
614 qdev->mem_map_registers;
615
616 /* See if scan mode is enabled before we turn it off */
617 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
618 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
619 /* Scan is enabled */
620 ret = 1;
621 } else {
622 /* Scan is disabled */
623 ret = 0;
624 }
625
626 /*
627 * When disabling scan mode you must first change the MII register
628 * address
629 */
630 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
631 PHYAddr[0] | MII_SCAN_REGISTER);
632
633 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
634 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
635 MAC_MII_CONTROL_RC) << 16));
636
637 return ret;
638}
639
640static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
641 u16 regAddr, u16 value, u32 mac_index)
642{
643 struct ql3xxx_port_registers __iomem *port_regs =
644 qdev->mem_map_registers;
645 u8 scanWasEnabled;
646
647 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
648
649 if (ql_wait_for_mii_ready(qdev)) {
650 if (netif_msg_link(qdev))
651 printk(KERN_WARNING PFX
652 "%s Timed out waiting for management port to "
653 "get free before issuing command.\n",
654 qdev->ndev->name);
655 return -1;
656 }
657
658 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
659 PHYAddr[mac_index] | regAddr);
660
661 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
662
663 /* Wait for write to complete 9/10/04 SJP */
664 if (ql_wait_for_mii_ready(qdev)) {
665 if (netif_msg_link(qdev))
666 printk(KERN_WARNING PFX
667 "%s: Timed out waiting for management port to"
668 "get free before issuing command.\n",
669 qdev->ndev->name);
670 return -1;
671 }
672
673 if (scanWasEnabled)
674 ql_mii_enable_scan_mode(qdev);
675
676 return 0;
677}
678
679static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
680 u16 * value, u32 mac_index)
681{
682 struct ql3xxx_port_registers __iomem *port_regs =
683 qdev->mem_map_registers;
684 u8 scanWasEnabled;
685 u32 temp;
686
687 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
688
689 if (ql_wait_for_mii_ready(qdev)) {
690 if (netif_msg_link(qdev))
691 printk(KERN_WARNING PFX
692 "%s: Timed out waiting for management port to "
693 "get free before issuing command.\n",
694 qdev->ndev->name);
695 return -1;
696 }
697
698 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
699 PHYAddr[mac_index] | regAddr);
700
701 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
702 (MAC_MII_CONTROL_RC << 16));
703
704 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
705 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
706
707 /* Wait for the read to complete */
708 if (ql_wait_for_mii_ready(qdev)) {
709 if (netif_msg_link(qdev))
710 printk(KERN_WARNING PFX
711 "%s: Timed out waiting for management port to "
712 "get free after issuing command.\n",
713 qdev->ndev->name);
714 return -1;
715 }
716
717 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
718 *value = (u16) temp;
719
720 if (scanWasEnabled)
721 ql_mii_enable_scan_mode(qdev);
722
723 return 0;
724}
725
726static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
727{
728 struct ql3xxx_port_registers __iomem *port_regs =
729 qdev->mem_map_registers;
730
731 ql_mii_disable_scan_mode(qdev);
732
733 if (ql_wait_for_mii_ready(qdev)) {
734 if (netif_msg_link(qdev))
735 printk(KERN_WARNING PFX
736 "%s: Timed out waiting for management port to "
737 "get free before issuing command.\n",
738 qdev->ndev->name);
739 return -1;
740 }
741
742 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
743 qdev->PHYAddr | regAddr);
744
745 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
746
747 /* Wait for write to complete. */
748 if (ql_wait_for_mii_ready(qdev)) {
749 if (netif_msg_link(qdev))
750 printk(KERN_WARNING PFX
751 "%s: Timed out waiting for management port to "
752 "get free before issuing command.\n",
753 qdev->ndev->name);
754 return -1;
755 }
756
757 ql_mii_enable_scan_mode(qdev);
758
759 return 0;
760}
761
762static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
763{
764 u32 temp;
765 struct ql3xxx_port_registers __iomem *port_regs =
766 qdev->mem_map_registers;
767
768 ql_mii_disable_scan_mode(qdev);
769
770 if (ql_wait_for_mii_ready(qdev)) {
771 if (netif_msg_link(qdev))
772 printk(KERN_WARNING PFX
773 "%s: Timed out waiting for management port to "
774 "get free before issuing command.\n",
775 qdev->ndev->name);
776 return -1;
777 }
778
779 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
780 qdev->PHYAddr | regAddr);
781
782 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
783 (MAC_MII_CONTROL_RC << 16));
784
785 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
786 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
787
788 /* Wait for the read to complete */
789 if (ql_wait_for_mii_ready(qdev)) {
790 if (netif_msg_link(qdev))
791 printk(KERN_WARNING PFX
792 "%s: Timed out waiting for management port to "
793 "get free before issuing command.\n",
794 qdev->ndev->name);
795 return -1;
796 }
797
798 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
799 *value = (u16) temp;
800
801 ql_mii_enable_scan_mode(qdev);
802
803 return 0;
804}
805
806static void ql_petbi_reset(struct ql3_adapter *qdev)
807{
808 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
809}
810
811static void ql_petbi_start_neg(struct ql3_adapter *qdev)
812{
813 u16 reg;
814
815 /* Enable Auto-negotiation sense */
816 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
817 reg |= PETBI_TBI_AUTO_SENSE;
818 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
819
820 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
821 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
822
823 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
824 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
825 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
826
827}
828
829static void ql_petbi_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
830{
831 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
832 mac_index);
833}
834
835static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
836{
837 u16 reg;
838
839 /* Enable Auto-negotiation sense */
840 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, mac_index);
841 reg |= PETBI_TBI_AUTO_SENSE;
842 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, mac_index);
843
844 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
845 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, mac_index);
846
847 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
848 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
849 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
850 mac_index);
851}
852
853static void ql_petbi_init(struct ql3_adapter *qdev)
854{
855 ql_petbi_reset(qdev);
856 ql_petbi_start_neg(qdev);
857}
858
859static void ql_petbi_init_ex(struct ql3_adapter *qdev, u32 mac_index)
860{
861 ql_petbi_reset_ex(qdev, mac_index);
862 ql_petbi_start_neg_ex(qdev, mac_index);
863}
864
865static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
866{
867 u16 reg;
868
869 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
870 return 0;
871
872 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
873}
874
875static int ql_phy_get_speed(struct ql3_adapter *qdev)
876{
877 u16 reg;
878
879 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
880 return 0;
881
882 reg = (((reg & 0x18) >> 3) & 3);
883
884 if (reg == 2)
885 return SPEED_1000;
886 else if (reg == 1)
887 return SPEED_100;
888 else if (reg == 0)
889 return SPEED_10;
890 else
891 return -1;
892}
893
894static int ql_is_full_dup(struct ql3_adapter *qdev)
895{
896 u16 reg;
897
898 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
899 return 0;
900
901 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
902}
903
904static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
905{
906 u16 reg;
907
908 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
909 return 0;
910
911 return (reg & PHY_NEG_PAUSE) != 0;
912}
913
914/*
915 * Caller holds hw_lock.
916 */
917static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
918{
919 struct ql3xxx_port_registers __iomem *port_regs =
920 qdev->mem_map_registers;
921 u32 value;
922
923 if (enable)
924 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
925 else
926 value = (MAC_CONFIG_REG_PE << 16);
927
928 if (qdev->mac_index)
929 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
930 else
931 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
932}
933
934/*
935 * Caller holds hw_lock.
936 */
937static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
938{
939 struct ql3xxx_port_registers __iomem *port_regs =
940 qdev->mem_map_registers;
941 u32 value;
942
943 if (enable)
944 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
945 else
946 value = (MAC_CONFIG_REG_SR << 16);
947
948 if (qdev->mac_index)
949 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
950 else
951 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
952}
953
954/*
955 * Caller holds hw_lock.
956 */
957static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
958{
959 struct ql3xxx_port_registers __iomem *port_regs =
960 qdev->mem_map_registers;
961 u32 value;
962
963 if (enable)
964 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
965 else
966 value = (MAC_CONFIG_REG_GM << 16);
967
968 if (qdev->mac_index)
969 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
970 else
971 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
972}
973
974/*
975 * Caller holds hw_lock.
976 */
977static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
978{
979 struct ql3xxx_port_registers __iomem *port_regs =
980 qdev->mem_map_registers;
981 u32 value;
982
983 if (enable)
984 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
985 else
986 value = (MAC_CONFIG_REG_FD << 16);
987
988 if (qdev->mac_index)
989 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
990 else
991 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
992}
993
994/*
995 * Caller holds hw_lock.
996 */
997static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
998{
999 struct ql3xxx_port_registers __iomem *port_regs =
1000 qdev->mem_map_registers;
1001 u32 value;
1002
1003 if (enable)
1004 value =
1005 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1006 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1007 else
1008 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1009
1010 if (qdev->mac_index)
1011 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1012 else
1013 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1014}
1015
1016/*
1017 * Caller holds hw_lock.
1018 */
1019static int ql_is_fiber(struct ql3_adapter *qdev)
1020{
1021 struct ql3xxx_port_registers __iomem *port_regs =
1022 qdev->mem_map_registers;
1023 u32 bitToCheck = 0;
1024 u32 temp;
1025
1026 switch (qdev->mac_index) {
1027 case 0:
1028 bitToCheck = PORT_STATUS_SM0;
1029 break;
1030 case 1:
1031 bitToCheck = PORT_STATUS_SM1;
1032 break;
1033 }
1034
1035 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1036 return (temp & bitToCheck) != 0;
1037}
1038
1039static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1040{
1041 u16 reg;
1042 ql_mii_read_reg(qdev, 0x00, &reg);
1043 return (reg & 0x1000) != 0;
1044}
1045
1046/*
1047 * Caller holds hw_lock.
1048 */
1049static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1050{
1051 struct ql3xxx_port_registers __iomem *port_regs =
1052 qdev->mem_map_registers;
1053 u32 bitToCheck = 0;
1054 u32 temp;
1055
1056 switch (qdev->mac_index) {
1057 case 0:
1058 bitToCheck = PORT_STATUS_AC0;
1059 break;
1060 case 1:
1061 bitToCheck = PORT_STATUS_AC1;
1062 break;
1063 }
1064
1065 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1066 if (temp & bitToCheck) {
1067 if (netif_msg_link(qdev))
1068 printk(KERN_INFO PFX
1069 "%s: Auto-Negotiate complete.\n",
1070 qdev->ndev->name);
1071 return 1;
1072 } else {
1073 if (netif_msg_link(qdev))
1074 printk(KERN_WARNING PFX
1075 "%s: Auto-Negotiate incomplete.\n",
1076 qdev->ndev->name);
1077 return 0;
1078 }
1079}
1080
1081/*
1082 * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1083 */
1084static int ql_is_neg_pause(struct ql3_adapter *qdev)
1085{
1086 if (ql_is_fiber(qdev))
1087 return ql_is_petbi_neg_pause(qdev);
1088 else
1089 return ql_is_phy_neg_pause(qdev);
1090}
1091
1092static int ql_auto_neg_error(struct ql3_adapter *qdev)
1093{
1094 struct ql3xxx_port_registers __iomem *port_regs =
1095 qdev->mem_map_registers;
1096 u32 bitToCheck = 0;
1097 u32 temp;
1098
1099 switch (qdev->mac_index) {
1100 case 0:
1101 bitToCheck = PORT_STATUS_AE0;
1102 break;
1103 case 1:
1104 bitToCheck = PORT_STATUS_AE1;
1105 break;
1106 }
1107 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1108 return (temp & bitToCheck) != 0;
1109}
1110
1111static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1112{
1113 if (ql_is_fiber(qdev))
1114 return SPEED_1000;
1115 else
1116 return ql_phy_get_speed(qdev);
1117}
1118
1119static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1120{
1121 if (ql_is_fiber(qdev))
1122 return 1;
1123 else
1124 return ql_is_full_dup(qdev);
1125}
1126
1127/*
1128 * Caller holds hw_lock.
1129 */
1130static int ql_link_down_detect(struct ql3_adapter *qdev)
1131{
1132 struct ql3xxx_port_registers __iomem *port_regs =
1133 qdev->mem_map_registers;
1134 u32 bitToCheck = 0;
1135 u32 temp;
1136
1137 switch (qdev->mac_index) {
1138 case 0:
1139 bitToCheck = ISP_CONTROL_LINK_DN_0;
1140 break;
1141 case 1:
1142 bitToCheck = ISP_CONTROL_LINK_DN_1;
1143 break;
1144 }
1145
1146 temp =
1147 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1148 return (temp & bitToCheck) != 0;
1149}
1150
1151/*
1152 * Caller holds hw_lock.
1153 */
1154static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1155{
1156 struct ql3xxx_port_registers __iomem *port_regs =
1157 qdev->mem_map_registers;
1158
1159 switch (qdev->mac_index) {
1160 case 0:
1161 ql_write_common_reg(qdev,
1162 &port_regs->CommonRegs.ispControlStatus,
1163 (ISP_CONTROL_LINK_DN_0) |
1164 (ISP_CONTROL_LINK_DN_0 << 16));
1165 break;
1166
1167 case 1:
1168 ql_write_common_reg(qdev,
1169 &port_regs->CommonRegs.ispControlStatus,
1170 (ISP_CONTROL_LINK_DN_1) |
1171 (ISP_CONTROL_LINK_DN_1 << 16));
1172 break;
1173
1174 default:
1175 return 1;
1176 }
1177
1178 return 0;
1179}
1180
1181/*
1182 * Caller holds hw_lock.
1183 */
1184static int ql_this_adapter_controls_port(struct ql3_adapter *qdev,
1185 u32 mac_index)
1186{
1187 struct ql3xxx_port_registers __iomem *port_regs =
1188 qdev->mem_map_registers;
1189 u32 bitToCheck = 0;
1190 u32 temp;
1191
1192 switch (mac_index) {
1193 case 0:
1194 bitToCheck = PORT_STATUS_F1_ENABLED;
1195 break;
1196 case 1:
1197 bitToCheck = PORT_STATUS_F3_ENABLED;
1198 break;
1199 default:
1200 break;
1201 }
1202
1203 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1204 if (temp & bitToCheck) {
1205 if (netif_msg_link(qdev))
1206 printk(KERN_DEBUG PFX
1207 "%s: is not link master.\n", qdev->ndev->name);
1208 return 0;
1209 } else {
1210 if (netif_msg_link(qdev))
1211 printk(KERN_DEBUG PFX
1212 "%s: is link master.\n", qdev->ndev->name);
1213 return 1;
1214 }
1215}
1216
1217static void ql_phy_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
1218{
1219 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, mac_index);
1220}
1221
1222static void ql_phy_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
1223{
1224 u16 reg;
1225
1226 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER,
1227 PHY_NEG_PAUSE | PHY_NEG_ADV_SPEED | 1, mac_index);
1228
1229 ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, mac_index);
1230 ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG,
1231 mac_index);
1232}
1233
1234static void ql_phy_init_ex(struct ql3_adapter *qdev, u32 mac_index)
1235{
1236 ql_phy_reset_ex(qdev, mac_index);
1237 ql_phy_start_neg_ex(qdev, mac_index);
1238}
1239
1240/*
1241 * Caller holds hw_lock.
1242 */
1243static u32 ql_get_link_state(struct ql3_adapter *qdev)
1244{
1245 struct ql3xxx_port_registers __iomem *port_regs =
1246 qdev->mem_map_registers;
1247 u32 bitToCheck = 0;
1248 u32 temp, linkState;
1249
1250 switch (qdev->mac_index) {
1251 case 0:
1252 bitToCheck = PORT_STATUS_UP0;
1253 break;
1254 case 1:
1255 bitToCheck = PORT_STATUS_UP1;
1256 break;
1257 }
1258 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1259 if (temp & bitToCheck) {
1260 linkState = LS_UP;
1261 } else {
1262 linkState = LS_DOWN;
1263 if (netif_msg_link(qdev))
1264 printk(KERN_WARNING PFX
1265 "%s: Link is down.\n", qdev->ndev->name);
1266 }
1267 return linkState;
1268}
1269
1270static int ql_port_start(struct ql3_adapter *qdev)
1271{
1272 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1273 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1274 2) << 7))
1275 return -1;
1276
1277 if (ql_is_fiber(qdev)) {
1278 ql_petbi_init(qdev);
1279 } else {
1280 /* Copper port */
1281 ql_phy_init_ex(qdev, qdev->mac_index);
1282 }
1283
1284 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1285 return 0;
1286}
1287
1288static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1289{
1290
1291 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1292 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1293 2) << 7))
1294 return -1;
1295
1296 if (!ql_auto_neg_error(qdev)) {
1297 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1298 /* configure the MAC */
1299 if (netif_msg_link(qdev))
1300 printk(KERN_DEBUG PFX
1301 "%s: Configuring link.\n",
1302 qdev->ndev->
1303 name);
1304 ql_mac_cfg_soft_reset(qdev, 1);
1305 ql_mac_cfg_gig(qdev,
1306 (ql_get_link_speed
1307 (qdev) ==
1308 SPEED_1000));
1309 ql_mac_cfg_full_dup(qdev,
1310 ql_is_link_full_dup
1311 (qdev));
1312 ql_mac_cfg_pause(qdev,
1313 ql_is_neg_pause
1314 (qdev));
1315 ql_mac_cfg_soft_reset(qdev, 0);
1316
1317 /* enable the MAC */
1318 if (netif_msg_link(qdev))
1319 printk(KERN_DEBUG PFX
1320 "%s: Enabling mac.\n",
1321 qdev->ndev->
1322 name);
1323 ql_mac_enable(qdev, 1);
1324 }
1325
1326 if (netif_msg_link(qdev))
1327 printk(KERN_DEBUG PFX
1328 "%s: Change port_link_state LS_DOWN to LS_UP.\n",
1329 qdev->ndev->name);
1330 qdev->port_link_state = LS_UP;
1331 netif_start_queue(qdev->ndev);
1332 netif_carrier_on(qdev->ndev);
1333 if (netif_msg_link(qdev))
1334 printk(KERN_INFO PFX
1335 "%s: Link is up at %d Mbps, %s duplex.\n",
1336 qdev->ndev->name,
1337 ql_get_link_speed(qdev),
1338 ql_is_link_full_dup(qdev)
1339 ? "full" : "half");
1340
1341 } else { /* Remote error detected */
1342
1343 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1344 if (netif_msg_link(qdev))
1345 printk(KERN_DEBUG PFX
1346 "%s: Remote error detected. "
1347 "Calling ql_port_start().\n",
1348 qdev->ndev->
1349 name);
1350 /*
1351 * ql_port_start() is shared code and needs
1352 * to lock the PHY on it's own.
1353 */
1354 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1355 if(ql_port_start(qdev)) {/* Restart port */
1356 return -1;
1357 } else
1358 return 0;
1359 }
1360 }
1361 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1362 return 0;
1363}
1364
1365static void ql_link_state_machine(struct ql3_adapter *qdev)
1366{
1367 u32 curr_link_state;
1368 unsigned long hw_flags;
1369
1370 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1371
1372 curr_link_state = ql_get_link_state(qdev);
1373
1374 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1375 if (netif_msg_link(qdev))
1376 printk(KERN_INFO PFX
1377 "%s: Reset in progress, skip processing link "
1378 "state.\n", qdev->ndev->name);
1379 return;
1380 }
1381
1382 switch (qdev->port_link_state) {
1383 default:
1384 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1385 ql_port_start(qdev);
1386 }
1387 qdev->port_link_state = LS_DOWN;
1388 /* Fall Through */
1389
1390 case LS_DOWN:
1391 if (netif_msg_link(qdev))
1392 printk(KERN_DEBUG PFX
1393 "%s: port_link_state = LS_DOWN.\n",
1394 qdev->ndev->name);
1395 if (curr_link_state == LS_UP) {
1396 if (netif_msg_link(qdev))
1397 printk(KERN_DEBUG PFX
1398 "%s: curr_link_state = LS_UP.\n",
1399 qdev->ndev->name);
1400 if (ql_is_auto_neg_complete(qdev))
1401 ql_finish_auto_neg(qdev);
1402
1403 if (qdev->port_link_state == LS_UP)
1404 ql_link_down_detect_clear(qdev);
1405
1406 }
1407 break;
1408
1409 case LS_UP:
1410 /*
1411 * See if the link is currently down or went down and came
1412 * back up
1413 */
1414 if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
1415 if (netif_msg_link(qdev))
1416 printk(KERN_INFO PFX "%s: Link is down.\n",
1417 qdev->ndev->name);
1418 qdev->port_link_state = LS_DOWN;
1419 }
1420 break;
1421 }
1422 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1423}
1424
1425/*
1426 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1427 */
1428static void ql_get_phy_owner(struct ql3_adapter *qdev)
1429{
1430 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1431 set_bit(QL_LINK_MASTER,&qdev->flags);
1432 else
1433 clear_bit(QL_LINK_MASTER,&qdev->flags);
1434}
1435
1436/*
1437 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1438 */
1439static void ql_init_scan_mode(struct ql3_adapter *qdev)
1440{
1441 ql_mii_enable_scan_mode(qdev);
1442
1443 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1444 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1445 ql_petbi_init_ex(qdev, qdev->mac_index);
1446 } else {
1447 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1448 ql_phy_init_ex(qdev, qdev->mac_index);
1449 }
1450}
1451
1452/*
1453 * MII_Setup needs to be called before taking the PHY out of reset so that the
1454 * management interface clock speed can be set properly. It would be better if
1455 * we had a way to disable MDC until after the PHY is out of reset, but we
1456 * don't have that capability.
1457 */
1458static int ql_mii_setup(struct ql3_adapter *qdev)
1459{
1460 u32 reg;
1461 struct ql3xxx_port_registers __iomem *port_regs =
1462 qdev->mem_map_registers;
1463
1464 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1465 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1466 2) << 7))
1467 return -1;
1468
1469 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1470 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1471
1472 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1473 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1474
1475 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1476 return 0;
1477}
1478
1479static u32 ql_supported_modes(struct ql3_adapter *qdev)
1480{
1481 u32 supported;
1482
1483 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1484 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1485 | SUPPORTED_Autoneg;
1486 } else {
1487 supported = SUPPORTED_10baseT_Half
1488 | SUPPORTED_10baseT_Full
1489 | SUPPORTED_100baseT_Half
1490 | SUPPORTED_100baseT_Full
1491 | SUPPORTED_1000baseT_Half
1492 | SUPPORTED_1000baseT_Full
1493 | SUPPORTED_Autoneg | SUPPORTED_TP;
1494 }
1495
1496 return supported;
1497}
1498
1499static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1500{
1501 int status;
1502 unsigned long hw_flags;
1503 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1504 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1505 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1506 2) << 7))
1507 return 0;
1508 status = ql_is_auto_cfg(qdev);
1509 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1510 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1511 return status;
1512}
1513
1514static u32 ql_get_speed(struct ql3_adapter *qdev)
1515{
1516 u32 status;
1517 unsigned long hw_flags;
1518 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1519 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1520 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1521 2) << 7))
1522 return 0;
1523 status = ql_get_link_speed(qdev);
1524 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1525 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1526 return status;
1527}
1528
1529static int ql_get_full_dup(struct ql3_adapter *qdev)
1530{
1531 int status;
1532 unsigned long hw_flags;
1533 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1534 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1535 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1536 2) << 7))
1537 return 0;
1538 status = ql_is_link_full_dup(qdev);
1539 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1540 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1541 return status;
1542}
1543
1544
1545static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1546{
1547 struct ql3_adapter *qdev = netdev_priv(ndev);
1548
1549 ecmd->transceiver = XCVR_INTERNAL;
1550 ecmd->supported = ql_supported_modes(qdev);
1551
1552 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1553 ecmd->port = PORT_FIBRE;
1554 } else {
1555 ecmd->port = PORT_TP;
1556 ecmd->phy_address = qdev->PHYAddr;
1557 }
1558 ecmd->advertising = ql_supported_modes(qdev);
1559 ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1560 ecmd->speed = ql_get_speed(qdev);
1561 ecmd->duplex = ql_get_full_dup(qdev);
1562 return 0;
1563}
1564
1565static void ql_get_drvinfo(struct net_device *ndev,
1566 struct ethtool_drvinfo *drvinfo)
1567{
1568 struct ql3_adapter *qdev = netdev_priv(ndev);
1569 strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
1570 strncpy(drvinfo->version, ql3xxx_driver_version, 32);
1571 strncpy(drvinfo->fw_version, "N/A", 32);
1572 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1573 drvinfo->n_stats = 0;
1574 drvinfo->testinfo_len = 0;
1575 drvinfo->regdump_len = 0;
1576 drvinfo->eedump_len = 0;
1577}
1578
1579static u32 ql_get_msglevel(struct net_device *ndev)
1580{
1581 struct ql3_adapter *qdev = netdev_priv(ndev);
1582 return qdev->msg_enable;
1583}
1584
1585static void ql_set_msglevel(struct net_device *ndev, u32 value)
1586{
1587 struct ql3_adapter *qdev = netdev_priv(ndev);
1588 qdev->msg_enable = value;
1589}
1590
1591static struct ethtool_ops ql3xxx_ethtool_ops = {
1592 .get_settings = ql_get_settings,
1593 .get_drvinfo = ql_get_drvinfo,
1594 .get_perm_addr = ethtool_op_get_perm_addr,
1595 .get_link = ethtool_op_get_link,
1596 .get_msglevel = ql_get_msglevel,
1597 .set_msglevel = ql_set_msglevel,
1598};
1599
1600static int ql_populate_free_queue(struct ql3_adapter *qdev)
1601{
1602 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1603 u64 map;
1604
1605 while (lrg_buf_cb) {
1606 if (!lrg_buf_cb->skb) {
1607 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
1608 if (unlikely(!lrg_buf_cb->skb)) {
1609 printk(KERN_DEBUG PFX
1610 "%s: Failed dev_alloc_skb().\n",
1611 qdev->ndev->name);
1612 break;
1613 } else {
1614 /*
1615 * We save some space to copy the ethhdr from
1616 * first buffer
1617 */
1618 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1619 map = pci_map_single(qdev->pdev,
1620 lrg_buf_cb->skb->data,
1621 qdev->lrg_buffer_len -
1622 QL_HEADER_SPACE,
1623 PCI_DMA_FROMDEVICE);
1624 lrg_buf_cb->buf_phy_addr_low =
1625 cpu_to_le32(LS_64BITS(map));
1626 lrg_buf_cb->buf_phy_addr_high =
1627 cpu_to_le32(MS_64BITS(map));
1628 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1629 pci_unmap_len_set(lrg_buf_cb, maplen,
1630 qdev->lrg_buffer_len -
1631 QL_HEADER_SPACE);
1632 --qdev->lrg_buf_skb_check;
1633 if (!qdev->lrg_buf_skb_check)
1634 return 1;
1635 }
1636 }
1637 lrg_buf_cb = lrg_buf_cb->next;
1638 }
1639 return 0;
1640}
1641
1642/*
1643 * Caller holds hw_lock.
1644 */
1645static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1646{
1647 struct bufq_addr_element *lrg_buf_q_ele;
1648 int i;
1649 struct ql_rcv_buf_cb *lrg_buf_cb;
1650 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1651
1652 if ((qdev->lrg_buf_free_count >= 8)
1653 && (qdev->lrg_buf_release_cnt >= 16)) {
1654
1655 if (qdev->lrg_buf_skb_check)
1656 if (!ql_populate_free_queue(qdev))
1657 return;
1658
1659 lrg_buf_q_ele = qdev->lrg_buf_next_free;
1660
1661 while ((qdev->lrg_buf_release_cnt >= 16)
1662 && (qdev->lrg_buf_free_count >= 8)) {
1663
1664 for (i = 0; i < 8; i++) {
1665 lrg_buf_cb =
1666 ql_get_from_lrg_buf_free_list(qdev);
1667 lrg_buf_q_ele->addr_high =
1668 lrg_buf_cb->buf_phy_addr_high;
1669 lrg_buf_q_ele->addr_low =
1670 lrg_buf_cb->buf_phy_addr_low;
1671 lrg_buf_q_ele++;
1672
1673 qdev->lrg_buf_release_cnt--;
1674 }
1675
1676 qdev->lrg_buf_q_producer_index++;
1677
1678 if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES)
1679 qdev->lrg_buf_q_producer_index = 0;
1680
1681 if (qdev->lrg_buf_q_producer_index ==
1682 (NUM_LBUFQ_ENTRIES - 1)) {
1683 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1684 }
1685 }
1686
1687 qdev->lrg_buf_next_free = lrg_buf_q_ele;
1688
1689 ql_write_common_reg(qdev,
1690 (u32 *) & port_regs->CommonRegs.
1691 rxLargeQProducerIndex,
1692 qdev->lrg_buf_q_producer_index);
1693 }
1694}
1695
1696static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1697 struct ob_mac_iocb_rsp *mac_rsp)
1698{
1699 struct ql_tx_buf_cb *tx_cb;
1700
1701 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1702 pci_unmap_single(qdev->pdev,
1703 pci_unmap_addr(tx_cb, mapaddr),
1704 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
1705 dev_kfree_skb_irq(tx_cb->skb);
1706 qdev->stats.tx_packets++;
1707 qdev->stats.tx_bytes += tx_cb->skb->len;
1708 tx_cb->skb = NULL;
1709 atomic_inc(&qdev->tx_count);
1710}
1711
1712static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1713 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1714{
1715 long int offset;
1716 u32 lrg_buf_phy_addr_low = 0;
1717 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1718 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1719 u32 *curr_ial_ptr;
1720 struct sk_buff *skb;
1721 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
1722
1723 /*
1724 * Get the inbound address list (small buffer).
1725 */
1726 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
1727 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1728 qdev->small_buf_index = 0;
1729
1730 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
1731 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1732 qdev->small_buf_release_cnt++;
1733
1734 /* start of first buffer */
1735 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1736 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1737 qdev->lrg_buf_release_cnt++;
1738 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1739 qdev->lrg_buf_index = 0;
1740 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1741 curr_ial_ptr++;
1742
1743 /* start of second buffer */
1744 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1745 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1746
1747 /*
1748 * Second buffer gets sent up the stack.
1749 */
1750 qdev->lrg_buf_release_cnt++;
1751 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1752 qdev->lrg_buf_index = 0;
1753 skb = lrg_buf_cb2->skb;
1754
1755 qdev->stats.rx_packets++;
1756 qdev->stats.rx_bytes += length;
1757
1758 skb_put(skb, length);
1759 pci_unmap_single(qdev->pdev,
1760 pci_unmap_addr(lrg_buf_cb2, mapaddr),
1761 pci_unmap_len(lrg_buf_cb2, maplen),
1762 PCI_DMA_FROMDEVICE);
1763 prefetch(skb->data);
1764 skb->dev = qdev->ndev;
1765 skb->ip_summed = CHECKSUM_NONE;
1766 skb->protocol = eth_type_trans(skb, qdev->ndev);
1767
1768 netif_receive_skb(skb);
1769 qdev->ndev->last_rx = jiffies;
1770 lrg_buf_cb2->skb = NULL;
1771
1772 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1773 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1774}
1775
1776static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1777 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
1778{
1779 long int offset;
1780 u32 lrg_buf_phy_addr_low = 0;
1781 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1782 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1783 u32 *curr_ial_ptr;
1784 struct sk_buff *skb1, *skb2;
1785 struct net_device *ndev = qdev->ndev;
1786 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
1787 u16 size = 0;
1788
1789 /*
1790 * Get the inbound address list (small buffer).
1791 */
1792
1793 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
1794 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1795 qdev->small_buf_index = 0;
1796 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
1797 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1798 qdev->small_buf_release_cnt++;
1799
1800 /* start of first buffer */
1801 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1802 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1803
1804 qdev->lrg_buf_release_cnt++;
1805 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1806 qdev->lrg_buf_index = 0;
1807 skb1 = lrg_buf_cb1->skb;
1808 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1809 curr_ial_ptr++;
1810
1811 /* start of second buffer */
1812 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1813 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1814 skb2 = lrg_buf_cb2->skb;
1815 qdev->lrg_buf_release_cnt++;
1816 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1817 qdev->lrg_buf_index = 0;
1818
1819 qdev->stats.rx_packets++;
1820 qdev->stats.rx_bytes += length;
1821
1822 /*
1823 * Copy the ethhdr from first buffer to second. This
1824 * is necessary for IP completions.
1825 */
1826 if (*((u16 *) skb1->data) != 0xFFFF)
1827 size = VLAN_ETH_HLEN;
1828 else
1829 size = ETH_HLEN;
1830
1831 skb_put(skb2, length); /* Just the second buffer length here. */
1832 pci_unmap_single(qdev->pdev,
1833 pci_unmap_addr(lrg_buf_cb2, mapaddr),
1834 pci_unmap_len(lrg_buf_cb2, maplen),
1835 PCI_DMA_FROMDEVICE);
1836 prefetch(skb2->data);
1837
1838 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1839 skb2->dev = qdev->ndev;
1840 skb2->ip_summed = CHECKSUM_NONE;
1841 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
1842
1843 netif_receive_skb(skb2);
1844 ndev->last_rx = jiffies;
1845 lrg_buf_cb2->skb = NULL;
1846
1847 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1848 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1849}
1850
1851static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1852 int *tx_cleaned, int *rx_cleaned, int work_to_do)
1853{
1854 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1855 struct net_rsp_iocb *net_rsp;
1856 struct net_device *ndev = qdev->ndev;
1857 unsigned long hw_flags;
1858
1859 /* While there are entries in the completion queue. */
1860 while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
1861 qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) {
1862
1863 net_rsp = qdev->rsp_current;
1864 switch (net_rsp->opcode) {
1865
1866 case OPCODE_OB_MAC_IOCB_FN0:
1867 case OPCODE_OB_MAC_IOCB_FN2:
1868 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
1869 net_rsp);
1870 (*tx_cleaned)++;
1871 break;
1872
1873 case OPCODE_IB_MAC_IOCB:
1874 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
1875 net_rsp);
1876 (*rx_cleaned)++;
1877 break;
1878
1879 case OPCODE_IB_IP_IOCB:
1880 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
1881 net_rsp);
1882 (*rx_cleaned)++;
1883 break;
1884 default:
1885 {
1886 u32 *tmp = (u32 *) net_rsp;
1887 printk(KERN_ERR PFX
1888 "%s: Hit default case, not "
1889 "handled!\n"
1890 " dropping the packet, opcode = "
1891 "%x.\n",
1892 ndev->name, net_rsp->opcode);
1893 printk(KERN_ERR PFX
1894 "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
1895 (unsigned long int)tmp[0],
1896 (unsigned long int)tmp[1],
1897 (unsigned long int)tmp[2],
1898 (unsigned long int)tmp[3]);
1899 }
1900 }
1901
1902 qdev->rsp_consumer_index++;
1903
1904 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
1905 qdev->rsp_consumer_index = 0;
1906 qdev->rsp_current = qdev->rsp_q_virt_addr;
1907 } else {
1908 qdev->rsp_current++;
1909 }
1910 }
1911
1912 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1913
1914 ql_update_lrg_bufq_prod_index(qdev);
1915
1916 if (qdev->small_buf_release_cnt >= 16) {
1917 while (qdev->small_buf_release_cnt >= 16) {
1918 qdev->small_buf_q_producer_index++;
1919
1920 if (qdev->small_buf_q_producer_index ==
1921 NUM_SBUFQ_ENTRIES)
1922 qdev->small_buf_q_producer_index = 0;
1923 qdev->small_buf_release_cnt -= 8;
1924 }
1925
1926 ql_write_common_reg(qdev,
1927 (u32 *) & port_regs->CommonRegs.
1928 rxSmallQProducerIndex,
1929 qdev->small_buf_q_producer_index);
1930 }
1931
1932 ql_write_common_reg(qdev,
1933 (u32 *) & port_regs->CommonRegs.rspQConsumerIndex,
1934 qdev->rsp_consumer_index);
1935 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1936
1937 if (unlikely(netif_queue_stopped(qdev->ndev))) {
1938 if (netif_queue_stopped(qdev->ndev) &&
1939 (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
1940 netif_wake_queue(qdev->ndev);
1941 }
1942
1943 return *tx_cleaned + *rx_cleaned;
1944}
1945
1946static int ql_poll(struct net_device *ndev, int *budget)
1947{
1948 struct ql3_adapter *qdev = netdev_priv(ndev);
1949 int work_to_do = min(*budget, ndev->quota);
1950 int rx_cleaned = 0, tx_cleaned = 0;
1951
1952 if (!netif_carrier_ok(ndev))
1953 goto quit_polling;
1954
1955 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do);
1956 *budget -= rx_cleaned;
1957 ndev->quota -= rx_cleaned;
1958
1959 if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) {
1960quit_polling:
1961 netif_rx_complete(ndev);
1962 ql_enable_interrupts(qdev);
1963 return 0;
1964 }
1965 return 1;
1966}
1967
1968static irqreturn_t ql3xxx_isr(int irq, void *dev_id, struct pt_regs *regs)
1969{
1970
1971 struct net_device *ndev = dev_id;
1972 struct ql3_adapter *qdev = netdev_priv(ndev);
1973 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1974 u32 value;
1975 int handled = 1;
1976 u32 var;
1977
1978 port_regs = qdev->mem_map_registers;
1979
1980 value =
1981 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
1982
1983 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
1984 spin_lock(&qdev->adapter_lock);
1985 netif_stop_queue(qdev->ndev);
1986 netif_carrier_off(qdev->ndev);
1987 ql_disable_interrupts(qdev);
1988 qdev->port_link_state = LS_DOWN;
1989 set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
1990
1991 if (value & ISP_CONTROL_FE) {
1992 /*
1993 * Chip Fatal Error.
1994 */
1995 var =
1996 ql_read_page0_reg_l(qdev,
1997 &port_regs->PortFatalErrStatus);
1998 printk(KERN_WARNING PFX
1999 "%s: Resetting chip. PortFatalErrStatus "
2000 "register = 0x%x\n", ndev->name, var);
2001 set_bit(QL_RESET_START,&qdev->flags) ;
2002 } else {
2003 /*
2004 * Soft Reset Requested.
2005 */
2006 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2007 printk(KERN_ERR PFX
2008 "%s: Another function issued a reset to the "
2009 "chip. ISR value = %x.\n", ndev->name, value);
2010 }
2011 queue_work(qdev->workqueue, &qdev->reset_work);
2012 spin_unlock(&qdev->adapter_lock);
2013 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2014 ql_disable_interrupts(qdev);
2015 if (likely(netif_rx_schedule_prep(ndev)))
2016 __netif_rx_schedule(ndev);
2017 else
2018 ql_enable_interrupts(qdev);
2019 } else {
2020 return IRQ_NONE;
2021 }
2022
2023 return IRQ_RETVAL(handled);
2024}
2025
2026static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2027{
2028 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2029 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2030 struct ql_tx_buf_cb *tx_cb;
2031 struct ob_mac_iocb_req *mac_iocb_ptr;
2032 u64 map;
2033
2034 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2035 if (!netif_queue_stopped(ndev))
2036 netif_stop_queue(ndev);
2037 return NETDEV_TX_BUSY;
2038 }
2039 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2040 mac_iocb_ptr = tx_cb->queue_entry;
2041 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2042 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2043 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2044 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2045 mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len);
2046 tx_cb->skb = skb;
2047 map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
2048 mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map));
2049 mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map));
2050 mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E);
2051 pci_unmap_addr_set(tx_cb, mapaddr, map);
2052 pci_unmap_len_set(tx_cb, maplen, skb->len);
2053 atomic_dec(&qdev->tx_count);
2054
2055 qdev->req_producer_index++;
2056 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2057 qdev->req_producer_index = 0;
2058 wmb();
2059 ql_write_common_reg_l(qdev,
2060 (u32 *) & port_regs->CommonRegs.reqQProducerIndex,
2061 qdev->req_producer_index);
2062
2063 ndev->trans_start = jiffies;
2064 if (netif_msg_tx_queued(qdev))
2065 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2066 ndev->name, qdev->req_producer_index, skb->len);
2067
2068 return NETDEV_TX_OK;
2069}
2070static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2071{
2072 qdev->req_q_size =
2073 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2074
2075 qdev->req_q_virt_addr =
2076 pci_alloc_consistent(qdev->pdev,
2077 (size_t) qdev->req_q_size,
2078 &qdev->req_q_phy_addr);
2079
2080 if ((qdev->req_q_virt_addr == NULL) ||
2081 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2082 printk(KERN_ERR PFX "%s: reqQ failed.\n",
2083 qdev->ndev->name);
2084 return -ENOMEM;
2085 }
2086
2087 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2088
2089 qdev->rsp_q_virt_addr =
2090 pci_alloc_consistent(qdev->pdev,
2091 (size_t) qdev->rsp_q_size,
2092 &qdev->rsp_q_phy_addr);
2093
2094 if ((qdev->rsp_q_virt_addr == NULL) ||
2095 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2096 printk(KERN_ERR PFX
2097 "%s: rspQ allocation failed\n",
2098 qdev->ndev->name);
2099 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2100 qdev->req_q_virt_addr,
2101 qdev->req_q_phy_addr);
2102 return -ENOMEM;
2103 }
2104
2105 set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2106
2107 return 0;
2108}
2109
2110static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2111{
2112 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2113 printk(KERN_INFO PFX
2114 "%s: Already done.\n", qdev->ndev->name);
2115 return;
2116 }
2117
2118 pci_free_consistent(qdev->pdev,
2119 qdev->req_q_size,
2120 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2121
2122 qdev->req_q_virt_addr = NULL;
2123
2124 pci_free_consistent(qdev->pdev,
2125 qdev->rsp_q_size,
2126 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2127
2128 qdev->rsp_q_virt_addr = NULL;
2129
2130 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2131}
2132
2133static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2134{
2135 /* Create Large Buffer Queue */
2136 qdev->lrg_buf_q_size =
2137 NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2138 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2139 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2140 else
2141 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2142
2143 qdev->lrg_buf_q_alloc_virt_addr =
2144 pci_alloc_consistent(qdev->pdev,
2145 qdev->lrg_buf_q_alloc_size,
2146 &qdev->lrg_buf_q_alloc_phy_addr);
2147
2148 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2149 printk(KERN_ERR PFX
2150 "%s: lBufQ failed\n", qdev->ndev->name);
2151 return -ENOMEM;
2152 }
2153 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2154 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2155
2156 /* Create Small Buffer Queue */
2157 qdev->small_buf_q_size =
2158 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2159 if (qdev->small_buf_q_size < PAGE_SIZE)
2160 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2161 else
2162 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2163
2164 qdev->small_buf_q_alloc_virt_addr =
2165 pci_alloc_consistent(qdev->pdev,
2166 qdev->small_buf_q_alloc_size,
2167 &qdev->small_buf_q_alloc_phy_addr);
2168
2169 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2170 printk(KERN_ERR PFX
2171 "%s: Small Buffer Queue allocation failed.\n",
2172 qdev->ndev->name);
2173 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2174 qdev->lrg_buf_q_alloc_virt_addr,
2175 qdev->lrg_buf_q_alloc_phy_addr);
2176 return -ENOMEM;
2177 }
2178
2179 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2180 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2181 set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2182 return 0;
2183}
2184
2185static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2186{
2187 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2188 printk(KERN_INFO PFX
2189 "%s: Already done.\n", qdev->ndev->name);
2190 return;
2191 }
2192
2193 pci_free_consistent(qdev->pdev,
2194 qdev->lrg_buf_q_alloc_size,
2195 qdev->lrg_buf_q_alloc_virt_addr,
2196 qdev->lrg_buf_q_alloc_phy_addr);
2197
2198 qdev->lrg_buf_q_virt_addr = NULL;
2199
2200 pci_free_consistent(qdev->pdev,
2201 qdev->small_buf_q_alloc_size,
2202 qdev->small_buf_q_alloc_virt_addr,
2203 qdev->small_buf_q_alloc_phy_addr);
2204
2205 qdev->small_buf_q_virt_addr = NULL;
2206
2207 clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2208}
2209
2210static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2211{
2212 int i;
2213 struct bufq_addr_element *small_buf_q_entry;
2214
2215 /* Currently we allocate on one of memory and use it for smallbuffers */
2216 qdev->small_buf_total_size =
2217 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2218 QL_SMALL_BUFFER_SIZE);
2219
2220 qdev->small_buf_virt_addr =
2221 pci_alloc_consistent(qdev->pdev,
2222 qdev->small_buf_total_size,
2223 &qdev->small_buf_phy_addr);
2224
2225 if (qdev->small_buf_virt_addr == NULL) {
2226 printk(KERN_ERR PFX
2227 "%s: Failed to get small buffer memory.\n",
2228 qdev->ndev->name);
2229 return -ENOMEM;
2230 }
2231
2232 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2233 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2234
2235 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2236
2237 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low;
2238
2239 /* Initialize the small buffer queue. */
2240 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2241 small_buf_q_entry->addr_high =
2242 cpu_to_le32(qdev->small_buf_phy_addr_high);
2243 small_buf_q_entry->addr_low =
2244 cpu_to_le32(qdev->small_buf_phy_addr_low +
2245 (i * QL_SMALL_BUFFER_SIZE));
2246 small_buf_q_entry++;
2247 }
2248 qdev->small_buf_index = 0;
2249 set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
2250 return 0;
2251}
2252
2253static void ql_free_small_buffers(struct ql3_adapter *qdev)
2254{
2255 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2256 printk(KERN_INFO PFX
2257 "%s: Already done.\n", qdev->ndev->name);
2258 return;
2259 }
2260 if (qdev->small_buf_virt_addr != NULL) {
2261 pci_free_consistent(qdev->pdev,
2262 qdev->small_buf_total_size,
2263 qdev->small_buf_virt_addr,
2264 qdev->small_buf_phy_addr);
2265
2266 qdev->small_buf_virt_addr = NULL;
2267 }
2268}
2269
2270static void ql_free_large_buffers(struct ql3_adapter *qdev)
2271{
2272 int i = 0;
2273 struct ql_rcv_buf_cb *lrg_buf_cb;
2274
2275 for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
2276 lrg_buf_cb = &qdev->lrg_buf[i];
2277 if (lrg_buf_cb->skb) {
2278 dev_kfree_skb(lrg_buf_cb->skb);
2279 pci_unmap_single(qdev->pdev,
2280 pci_unmap_addr(lrg_buf_cb, mapaddr),
2281 pci_unmap_len(lrg_buf_cb, maplen),
2282 PCI_DMA_FROMDEVICE);
2283 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2284 } else {
2285 break;
2286 }
2287 }
2288}
2289
2290static void ql_init_large_buffers(struct ql3_adapter *qdev)
2291{
2292 int i;
2293 struct ql_rcv_buf_cb *lrg_buf_cb;
2294 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2295
2296 for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
2297 lrg_buf_cb = &qdev->lrg_buf[i];
2298 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2299 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2300 buf_addr_ele++;
2301 }
2302 qdev->lrg_buf_index = 0;
2303 qdev->lrg_buf_skb_check = 0;
2304}
2305
2306static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2307{
2308 int i;
2309 struct ql_rcv_buf_cb *lrg_buf_cb;
2310 struct sk_buff *skb;
2311 u64 map;
2312
2313 for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
2314 skb = dev_alloc_skb(qdev->lrg_buffer_len);
2315 if (unlikely(!skb)) {
2316 /* Better luck next round */
2317 printk(KERN_ERR PFX
2318 "%s: large buff alloc failed, "
2319 "for %d bytes at index %d.\n",
2320 qdev->ndev->name,
2321 qdev->lrg_buffer_len * 2, i);
2322 ql_free_large_buffers(qdev);
2323 return -ENOMEM;
2324 } else {
2325
2326 lrg_buf_cb = &qdev->lrg_buf[i];
2327 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2328 lrg_buf_cb->index = i;
2329 lrg_buf_cb->skb = skb;
2330 /*
2331 * We save some space to copy the ethhdr from first
2332 * buffer
2333 */
2334 skb_reserve(skb, QL_HEADER_SPACE);
2335 map = pci_map_single(qdev->pdev,
2336 skb->data,
2337 qdev->lrg_buffer_len -
2338 QL_HEADER_SPACE,
2339 PCI_DMA_FROMDEVICE);
2340 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2341 pci_unmap_len_set(lrg_buf_cb, maplen,
2342 qdev->lrg_buffer_len -
2343 QL_HEADER_SPACE);
2344 lrg_buf_cb->buf_phy_addr_low =
2345 cpu_to_le32(LS_64BITS(map));
2346 lrg_buf_cb->buf_phy_addr_high =
2347 cpu_to_le32(MS_64BITS(map));
2348 }
2349 }
2350 return 0;
2351}
2352
2353static void ql_create_send_free_list(struct ql3_adapter *qdev)
2354{
2355 struct ql_tx_buf_cb *tx_cb;
2356 int i;
2357 struct ob_mac_iocb_req *req_q_curr =
2358 qdev->req_q_virt_addr;
2359
2360 /* Create free list of transmit buffers */
2361 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2362 tx_cb = &qdev->tx_buf[i];
2363 tx_cb->skb = NULL;
2364 tx_cb->queue_entry = req_q_curr;
2365 req_q_curr++;
2366 }
2367}
2368
2369static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2370{
2371 if (qdev->ndev->mtu == NORMAL_MTU_SIZE)
2372 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2373 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2374 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2375 } else {
2376 printk(KERN_ERR PFX
2377 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
2378 qdev->ndev->name);
2379 return -ENOMEM;
2380 }
2381 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2382 qdev->max_frame_size =
2383 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2384
2385 /*
2386 * First allocate a page of shared memory and use it for shadow
2387 * locations of Network Request Queue Consumer Address Register and
2388 * Network Completion Queue Producer Index Register
2389 */
2390 qdev->shadow_reg_virt_addr =
2391 pci_alloc_consistent(qdev->pdev,
2392 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2393
2394 if (qdev->shadow_reg_virt_addr != NULL) {
2395 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
2396 qdev->req_consumer_index_phy_addr_high =
2397 MS_64BITS(qdev->shadow_reg_phy_addr);
2398 qdev->req_consumer_index_phy_addr_low =
2399 LS_64BITS(qdev->shadow_reg_phy_addr);
2400
2401 qdev->prsp_producer_index =
2402 (u32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2403 qdev->rsp_producer_index_phy_addr_high =
2404 qdev->req_consumer_index_phy_addr_high;
2405 qdev->rsp_producer_index_phy_addr_low =
2406 qdev->req_consumer_index_phy_addr_low + 8;
2407 } else {
2408 printk(KERN_ERR PFX
2409 "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
2410 return -ENOMEM;
2411 }
2412
2413 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2414 printk(KERN_ERR PFX
2415 "%s: ql_alloc_net_req_rsp_queues failed.\n",
2416 qdev->ndev->name);
2417 goto err_req_rsp;
2418 }
2419
2420 if (ql_alloc_buffer_queues(qdev) != 0) {
2421 printk(KERN_ERR PFX
2422 "%s: ql_alloc_buffer_queues failed.\n",
2423 qdev->ndev->name);
2424 goto err_buffer_queues;
2425 }
2426
2427 if (ql_alloc_small_buffers(qdev) != 0) {
2428 printk(KERN_ERR PFX
2429 "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
2430 goto err_small_buffers;
2431 }
2432
2433 if (ql_alloc_large_buffers(qdev) != 0) {
2434 printk(KERN_ERR PFX
2435 "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
2436 goto err_small_buffers;
2437 }
2438
2439 /* Initialize the large buffer queue. */
2440 ql_init_large_buffers(qdev);
2441 ql_create_send_free_list(qdev);
2442
2443 qdev->rsp_current = qdev->rsp_q_virt_addr;
2444
2445 return 0;
2446
2447err_small_buffers:
2448 ql_free_buffer_queues(qdev);
2449err_buffer_queues:
2450 ql_free_net_req_rsp_queues(qdev);
2451err_req_rsp:
2452 pci_free_consistent(qdev->pdev,
2453 PAGE_SIZE,
2454 qdev->shadow_reg_virt_addr,
2455 qdev->shadow_reg_phy_addr);
2456
2457 return -ENOMEM;
2458}
2459
2460static void ql_free_mem_resources(struct ql3_adapter *qdev)
2461{
2462 ql_free_large_buffers(qdev);
2463 ql_free_small_buffers(qdev);
2464 ql_free_buffer_queues(qdev);
2465 ql_free_net_req_rsp_queues(qdev);
2466 if (qdev->shadow_reg_virt_addr != NULL) {
2467 pci_free_consistent(qdev->pdev,
2468 PAGE_SIZE,
2469 qdev->shadow_reg_virt_addr,
2470 qdev->shadow_reg_phy_addr);
2471 qdev->shadow_reg_virt_addr = NULL;
2472 }
2473}
2474
2475static int ql_init_misc_registers(struct ql3_adapter *qdev)
2476{
2477 struct ql3xxx_local_ram_registers *local_ram =
2478 (struct ql3xxx_local_ram_registers *)qdev->mem_map_registers;
2479
2480 if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2481 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2482 2) << 4))
2483 return -1;
2484
2485 ql_write_page2_reg(qdev,
2486 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2487
2488 ql_write_page2_reg(qdev,
2489 &local_ram->maxBufletCount,
2490 qdev->nvram_data.bufletCount);
2491
2492 ql_write_page2_reg(qdev,
2493 &local_ram->freeBufletThresholdLow,
2494 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2495 (qdev->nvram_data.tcpWindowThreshold0));
2496
2497 ql_write_page2_reg(qdev,
2498 &local_ram->freeBufletThresholdHigh,
2499 qdev->nvram_data.tcpWindowThreshold50);
2500
2501 ql_write_page2_reg(qdev,
2502 &local_ram->ipHashTableBase,
2503 (qdev->nvram_data.ipHashTableBaseHi << 16) |
2504 qdev->nvram_data.ipHashTableBaseLo);
2505 ql_write_page2_reg(qdev,
2506 &local_ram->ipHashTableCount,
2507 qdev->nvram_data.ipHashTableSize);
2508 ql_write_page2_reg(qdev,
2509 &local_ram->tcpHashTableBase,
2510 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2511 qdev->nvram_data.tcpHashTableBaseLo);
2512 ql_write_page2_reg(qdev,
2513 &local_ram->tcpHashTableCount,
2514 qdev->nvram_data.tcpHashTableSize);
2515 ql_write_page2_reg(qdev,
2516 &local_ram->ncbBase,
2517 (qdev->nvram_data.ncbTableBaseHi << 16) |
2518 qdev->nvram_data.ncbTableBaseLo);
2519 ql_write_page2_reg(qdev,
2520 &local_ram->maxNcbCount,
2521 qdev->nvram_data.ncbTableSize);
2522 ql_write_page2_reg(qdev,
2523 &local_ram->drbBase,
2524 (qdev->nvram_data.drbTableBaseHi << 16) |
2525 qdev->nvram_data.drbTableBaseLo);
2526 ql_write_page2_reg(qdev,
2527 &local_ram->maxDrbCount,
2528 qdev->nvram_data.drbTableSize);
2529 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
2530 return 0;
2531}
2532
2533static int ql_adapter_initialize(struct ql3_adapter *qdev)
2534{
2535 u32 value;
2536 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2537 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
2538 (struct ql3xxx_host_memory_registers *)port_regs;
2539 u32 delay = 10;
2540 int status = 0;
2541
2542 if(ql_mii_setup(qdev))
2543 return -1;
2544
2545 /* Bring out PHY out of reset */
2546 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
2547 (ISP_SERIAL_PORT_IF_WE |
2548 (ISP_SERIAL_PORT_IF_WE << 16)));
2549
2550 qdev->port_link_state = LS_DOWN;
2551 netif_carrier_off(qdev->ndev);
2552
2553 /* V2 chip fix for ARS-39168. */
2554 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
2555 (ISP_SERIAL_PORT_IF_SDE |
2556 (ISP_SERIAL_PORT_IF_SDE << 16)));
2557
2558 /* Request Queue Registers */
2559 *((u32 *) (qdev->preq_consumer_index)) = 0;
2560 atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
2561 qdev->req_producer_index = 0;
2562
2563 ql_write_page1_reg(qdev,
2564 &hmem_regs->reqConsumerIndexAddrHigh,
2565 qdev->req_consumer_index_phy_addr_high);
2566 ql_write_page1_reg(qdev,
2567 &hmem_regs->reqConsumerIndexAddrLow,
2568 qdev->req_consumer_index_phy_addr_low);
2569
2570 ql_write_page1_reg(qdev,
2571 &hmem_regs->reqBaseAddrHigh,
2572 MS_64BITS(qdev->req_q_phy_addr));
2573 ql_write_page1_reg(qdev,
2574 &hmem_regs->reqBaseAddrLow,
2575 LS_64BITS(qdev->req_q_phy_addr));
2576 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
2577
2578 /* Response Queue Registers */
2579 *((u16 *) (qdev->prsp_producer_index)) = 0;
2580 qdev->rsp_consumer_index = 0;
2581 qdev->rsp_current = qdev->rsp_q_virt_addr;
2582
2583 ql_write_page1_reg(qdev,
2584 &hmem_regs->rspProducerIndexAddrHigh,
2585 qdev->rsp_producer_index_phy_addr_high);
2586
2587 ql_write_page1_reg(qdev,
2588 &hmem_regs->rspProducerIndexAddrLow,
2589 qdev->rsp_producer_index_phy_addr_low);
2590
2591 ql_write_page1_reg(qdev,
2592 &hmem_regs->rspBaseAddrHigh,
2593 MS_64BITS(qdev->rsp_q_phy_addr));
2594
2595 ql_write_page1_reg(qdev,
2596 &hmem_regs->rspBaseAddrLow,
2597 LS_64BITS(qdev->rsp_q_phy_addr));
2598
2599 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
2600
2601 /* Large Buffer Queue */
2602 ql_write_page1_reg(qdev,
2603 &hmem_regs->rxLargeQBaseAddrHigh,
2604 MS_64BITS(qdev->lrg_buf_q_phy_addr));
2605
2606 ql_write_page1_reg(qdev,
2607 &hmem_regs->rxLargeQBaseAddrLow,
2608 LS_64BITS(qdev->lrg_buf_q_phy_addr));
2609
2610 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES);
2611
2612 ql_write_page1_reg(qdev,
2613 &hmem_regs->rxLargeBufferLength,
2614 qdev->lrg_buffer_len);
2615
2616 /* Small Buffer Queue */
2617 ql_write_page1_reg(qdev,
2618 &hmem_regs->rxSmallQBaseAddrHigh,
2619 MS_64BITS(qdev->small_buf_q_phy_addr));
2620
2621 ql_write_page1_reg(qdev,
2622 &hmem_regs->rxSmallQBaseAddrLow,
2623 LS_64BITS(qdev->small_buf_q_phy_addr));
2624
2625 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
2626 ql_write_page1_reg(qdev,
2627 &hmem_regs->rxSmallBufferLength,
2628 QL_SMALL_BUFFER_SIZE);
2629
2630 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
2631 qdev->small_buf_release_cnt = 8;
2632 qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1;
2633 qdev->lrg_buf_release_cnt = 8;
2634 qdev->lrg_buf_next_free =
2635 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
2636 qdev->small_buf_index = 0;
2637 qdev->lrg_buf_index = 0;
2638 qdev->lrg_buf_free_count = 0;
2639 qdev->lrg_buf_free_head = NULL;
2640 qdev->lrg_buf_free_tail = NULL;
2641
2642 ql_write_common_reg(qdev,
2643 (u32 *) & port_regs->CommonRegs.
2644 rxSmallQProducerIndex,
2645 qdev->small_buf_q_producer_index);
2646 ql_write_common_reg(qdev,
2647 (u32 *) & port_regs->CommonRegs.
2648 rxLargeQProducerIndex,
2649 qdev->lrg_buf_q_producer_index);
2650
2651 /*
2652 * Find out if the chip has already been initialized. If it has, then
2653 * we skip some of the initialization.
2654 */
2655 clear_bit(QL_LINK_MASTER, &qdev->flags);
2656 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
2657 if ((value & PORT_STATUS_IC) == 0) {
2658
2659 /* Chip has not been configured yet, so let it rip. */
2660 if(ql_init_misc_registers(qdev)) {
2661 status = -1;
2662 goto out;
2663 }
2664
2665 if (qdev->mac_index)
2666 ql_write_page0_reg(qdev,
2667 &port_regs->mac1MaxFrameLengthReg,
2668 qdev->max_frame_size);
2669 else
2670 ql_write_page0_reg(qdev,
2671 &port_regs->mac0MaxFrameLengthReg,
2672 qdev->max_frame_size);
2673
2674 value = qdev->nvram_data.tcpMaxWindowSize;
2675 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
2676
2677 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
2678
2679 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
2680 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
2681 * 2) << 13)) {
2682 status = -1;
2683 goto out;
2684 }
2685 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
2686 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
2687 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
2688 16) | (INTERNAL_CHIP_SD |
2689 INTERNAL_CHIP_WE)));
2690 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
2691 }
2692
2693
2694 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
2695 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2696 2) << 7)) {
2697 status = -1;
2698 goto out;
2699 }
2700
2701 ql_init_scan_mode(qdev);
2702 ql_get_phy_owner(qdev);
2703
2704 /* Load the MAC Configuration */
2705
2706 /* Program lower 32 bits of the MAC address */
2707 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
2708 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
2709 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
2710 ((qdev->ndev->dev_addr[2] << 24)
2711 | (qdev->ndev->dev_addr[3] << 16)
2712 | (qdev->ndev->dev_addr[4] << 8)
2713 | qdev->ndev->dev_addr[5]));
2714
2715 /* Program top 16 bits of the MAC address */
2716 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
2717 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
2718 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
2719 ((qdev->ndev->dev_addr[0] << 8)
2720 | qdev->ndev->dev_addr[1]));
2721
2722 /* Enable Primary MAC */
2723 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
2724 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
2725 MAC_ADDR_INDIRECT_PTR_REG_PE));
2726
2727 /* Clear Primary and Secondary IP addresses */
2728 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
2729 ((IP_ADDR_INDEX_REG_MASK << 16) |
2730 (qdev->mac_index << 2)));
2731 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
2732
2733 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
2734 ((IP_ADDR_INDEX_REG_MASK << 16) |
2735 ((qdev->mac_index << 2) + 1)));
2736 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
2737
2738 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
2739
2740 /* Indicate Configuration Complete */
2741 ql_write_page0_reg(qdev,
2742 &port_regs->portControl,
2743 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
2744
2745 do {
2746 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
2747 if (value & PORT_STATUS_IC)
2748 break;
2749 msleep(500);
2750 } while (--delay);
2751
2752 if (delay == 0) {
2753 printk(KERN_ERR PFX
2754 "%s: Hw Initialization timeout.\n", qdev->ndev->name);
2755 status = -1;
2756 goto out;
2757 }
2758
2759 /* Enable Ethernet Function */
2760 value =
2761 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
2762 PORT_CONTROL_HH);
2763 ql_write_page0_reg(qdev, &port_regs->portControl,
2764 ((value << 16) | value));
2765
2766out:
2767 return status;
2768}
2769
2770/*
2771 * Caller holds hw_lock.
2772 */
2773static int ql_adapter_reset(struct ql3_adapter *qdev)
2774{
2775 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2776 int status = 0;
2777 u16 value;
2778 int max_wait_time;
2779
2780 set_bit(QL_RESET_ACTIVE, &qdev->flags);
2781 clear_bit(QL_RESET_DONE, &qdev->flags);
2782
2783 /*
2784 * Issue soft reset to chip.
2785 */
2786 printk(KERN_DEBUG PFX
2787 "%s: Issue soft reset to chip.\n",
2788 qdev->ndev->name);
2789 ql_write_common_reg(qdev,
2790 (u32 *) & port_regs->CommonRegs.ispControlStatus,
2791 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
2792
2793 /* Wait 3 seconds for reset to complete. */
2794 printk(KERN_DEBUG PFX
2795 "%s: Wait 10 milliseconds for reset to complete.\n",
2796 qdev->ndev->name);
2797
2798 /* Wait until the firmware tells us the Soft Reset is done */
2799 max_wait_time = 5;
2800 do {
2801 value =
2802 ql_read_common_reg(qdev,
2803 &port_regs->CommonRegs.ispControlStatus);
2804 if ((value & ISP_CONTROL_SR) == 0)
2805 break;
2806
2807 ssleep(1);
2808 } while ((--max_wait_time));
2809
2810 /*
2811 * Also, make sure that the Network Reset Interrupt bit has been
2812 * cleared after the soft reset has taken place.
2813 */
2814 value =
2815 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
2816 if (value & ISP_CONTROL_RI) {
2817 printk(KERN_DEBUG PFX
2818 "ql_adapter_reset: clearing RI after reset.\n");
2819 ql_write_common_reg(qdev,
2820 (u32 *) & port_regs->CommonRegs.
2821 ispControlStatus,
2822 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
2823 }
2824
2825 if (max_wait_time == 0) {
2826 /* Issue Force Soft Reset */
2827 ql_write_common_reg(qdev,
2828 (u32 *) & port_regs->CommonRegs.
2829 ispControlStatus,
2830 ((ISP_CONTROL_FSR << 16) |
2831 ISP_CONTROL_FSR));
2832 /*
2833 * Wait until the firmware tells us the Force Soft Reset is
2834 * done
2835 */
2836 max_wait_time = 5;
2837 do {
2838 value =
2839 ql_read_common_reg(qdev,
2840 &port_regs->CommonRegs.
2841 ispControlStatus);
2842 if ((value & ISP_CONTROL_FSR) == 0) {
2843 break;
2844 }
2845 ssleep(1);
2846 } while ((--max_wait_time));
2847 }
2848 if (max_wait_time == 0)
2849 status = 1;
2850
2851 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
2852 set_bit(QL_RESET_DONE, &qdev->flags);
2853 return status;
2854}
2855
2856static void ql_set_mac_info(struct ql3_adapter *qdev)
2857{
2858 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2859 u32 value, port_status;
2860 u8 func_number;
2861
2862 /* Get the function number */
2863 value =
2864 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2865 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
2866 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
2867 switch (value & ISP_CONTROL_FN_MASK) {
2868 case ISP_CONTROL_FN0_NET:
2869 qdev->mac_index = 0;
2870 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
2871 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
2872 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
2873 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
2874 qdev->PHYAddr = PORT0_PHY_ADDRESS;
2875 if (port_status & PORT_STATUS_SM0)
2876 set_bit(QL_LINK_OPTICAL,&qdev->flags);
2877 else
2878 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
2879 break;
2880
2881 case ISP_CONTROL_FN1_NET:
2882 qdev->mac_index = 1;
2883 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
2884 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
2885 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
2886 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
2887 qdev->PHYAddr = PORT1_PHY_ADDRESS;
2888 if (port_status & PORT_STATUS_SM1)
2889 set_bit(QL_LINK_OPTICAL,&qdev->flags);
2890 else
2891 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
2892 break;
2893
2894 case ISP_CONTROL_FN0_SCSI:
2895 case ISP_CONTROL_FN1_SCSI:
2896 default:
2897 printk(KERN_DEBUG PFX
2898 "%s: Invalid function number, ispControlStatus = 0x%x\n",
2899 qdev->ndev->name,value);
2900 break;
2901 }
2902 qdev->numPorts = qdev->nvram_data.numPorts;
2903}
2904
2905static void ql_display_dev_info(struct net_device *ndev)
2906{
2907 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2908 struct pci_dev *pdev = qdev->pdev;
2909
2910 printk(KERN_INFO PFX
2911 "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n",
2912 DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot);
2913 printk(KERN_INFO PFX
2914 "%s Interface.\n",
2915 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
2916
2917 /*
2918 * Print PCI bus width/type.
2919 */
2920 printk(KERN_INFO PFX
2921 "Bus interface is %s %s.\n",
2922 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
2923 ((qdev->pci_x) ? "PCI-X" : "PCI"));
2924
2925 printk(KERN_INFO PFX
2926 "mem IO base address adjusted = 0x%p\n",
2927 qdev->mem_map_registers);
2928 printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
2929
2930 if (netif_msg_probe(qdev))
2931 printk(KERN_INFO PFX
2932 "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
2933 ndev->name, ndev->dev_addr[0], ndev->dev_addr[1],
2934 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
2935 ndev->dev_addr[5]);
2936}
2937
2938static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
2939{
2940 struct net_device *ndev = qdev->ndev;
2941 int retval = 0;
2942
2943 netif_stop_queue(ndev);
2944 netif_carrier_off(ndev);
2945
2946 clear_bit(QL_ADAPTER_UP,&qdev->flags);
2947 clear_bit(QL_LINK_MASTER,&qdev->flags);
2948
2949 ql_disable_interrupts(qdev);
2950
2951 free_irq(qdev->pdev->irq, ndev);
2952
2953 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
2954 printk(KERN_INFO PFX
2955 "%s: calling pci_disable_msi().\n", qdev->ndev->name);
2956 clear_bit(QL_MSI_ENABLED,&qdev->flags);
2957 pci_disable_msi(qdev->pdev);
2958 }
2959
2960 del_timer_sync(&qdev->adapter_timer);
2961
2962 netif_poll_disable(ndev);
2963
2964 if (do_reset) {
2965 int soft_reset;
2966 unsigned long hw_flags;
2967
2968 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2969 if (ql_wait_for_drvr_lock(qdev)) {
2970 if ((soft_reset = ql_adapter_reset(qdev))) {
2971 printk(KERN_ERR PFX
2972 "%s: ql_adapter_reset(%d) FAILED!\n",
2973 ndev->name, qdev->index);
2974 }
2975 printk(KERN_ERR PFX
2976 "%s: Releaseing driver lock via chip reset.\n",ndev->name);
2977 } else {
2978 printk(KERN_ERR PFX
2979 "%s: Could not acquire driver lock to do "
2980 "reset!\n", ndev->name);
2981 retval = -1;
2982 }
2983 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2984 }
2985 ql_free_mem_resources(qdev);
2986 return retval;
2987}
2988
2989static int ql_adapter_up(struct ql3_adapter *qdev)
2990{
2991 struct net_device *ndev = qdev->ndev;
2992 int err;
2993 unsigned long irq_flags = SA_SAMPLE_RANDOM | SA_SHIRQ;
2994 unsigned long hw_flags;
2995
2996 if (ql_alloc_mem_resources(qdev)) {
2997 printk(KERN_ERR PFX
2998 "%s Unable to allocate buffers.\n", ndev->name);
2999 return -ENOMEM;
3000 }
3001
3002 if (qdev->msi) {
3003 if (pci_enable_msi(qdev->pdev)) {
3004 printk(KERN_ERR PFX
3005 "%s: User requested MSI, but MSI failed to "
3006 "initialize. Continuing without MSI.\n",
3007 qdev->ndev->name);
3008 qdev->msi = 0;
3009 } else {
3010 printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
3011 set_bit(QL_MSI_ENABLED,&qdev->flags);
3012 irq_flags &= ~SA_SHIRQ;
3013 }
3014 }
3015
3016 if ((err = request_irq(qdev->pdev->irq,
3017 ql3xxx_isr,
3018 irq_flags, ndev->name, ndev))) {
3019 printk(KERN_ERR PFX
3020 "%s: Failed to reserve interrupt %d already in use.\n",
3021 ndev->name, qdev->pdev->irq);
3022 goto err_irq;
3023 }
3024
3025 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3026
3027 if ((err = ql_wait_for_drvr_lock(qdev))) {
3028 if ((err = ql_adapter_initialize(qdev))) {
3029 printk(KERN_ERR PFX
3030 "%s: Unable to initialize adapter.\n",
3031 ndev->name);
3032 goto err_init;
3033 }
3034 printk(KERN_ERR PFX
3035 "%s: Releaseing driver lock.\n",ndev->name);
3036 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3037 } else {
3038 printk(KERN_ERR PFX
3039 "%s: Could not aquire driver lock.\n",
3040 ndev->name);
3041 goto err_lock;
3042 }
3043
3044 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3045
3046 set_bit(QL_ADAPTER_UP,&qdev->flags);
3047
3048 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3049
3050 netif_poll_enable(ndev);
3051 ql_enable_interrupts(qdev);
3052 return 0;
3053
3054err_init:
3055 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3056err_lock:
3057 free_irq(qdev->pdev->irq, ndev);
3058err_irq:
3059 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3060 printk(KERN_INFO PFX
3061 "%s: calling pci_disable_msi().\n",
3062 qdev->ndev->name);
3063 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3064 pci_disable_msi(qdev->pdev);
3065 }
3066 return err;
3067}
3068
3069static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3070{
3071 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3072 printk(KERN_ERR PFX
3073 "%s: Driver up/down cycle failed, "
3074 "closing device\n",qdev->ndev->name);
3075 dev_close(qdev->ndev);
3076 return -1;
3077 }
3078 return 0;
3079}
3080
3081static int ql3xxx_close(struct net_device *ndev)
3082{
3083 struct ql3_adapter *qdev = netdev_priv(ndev);
3084
3085 /*
3086 * Wait for device to recover from a reset.
3087 * (Rarely happens, but possible.)
3088 */
3089 while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
3090 msleep(50);
3091
3092 ql_adapter_down(qdev,QL_DO_RESET);
3093 return 0;
3094}
3095
3096static int ql3xxx_open(struct net_device *ndev)
3097{
3098 struct ql3_adapter *qdev = netdev_priv(ndev);
3099 return (ql_adapter_up(qdev));
3100}
3101
3102static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
3103{
3104 struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv;
3105 return &qdev->stats;
3106}
3107
3108static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu)
3109{
3110 struct ql3_adapter *qdev = netdev_priv(ndev);
3111 printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu);
3112 if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) {
3113 printk(KERN_ERR PFX
3114 "%s: mtu size of %d is not valid. Use exactly %d or "
3115 "%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE,
3116 JUMBO_MTU_SIZE);
3117 return -EINVAL;
3118 }
3119
3120 if (!netif_running(ndev)) {
3121 ndev->mtu = new_mtu;
3122 return 0;
3123 }
3124
3125 ndev->mtu = new_mtu;
3126 return ql_cycle_adapter(qdev,QL_DO_RESET);
3127}
3128
3129static void ql3xxx_set_multicast_list(struct net_device *ndev)
3130{
3131 /*
3132 * We are manually parsing the list in the net_device structure.
3133 */
3134 return;
3135}
3136
3137static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3138{
3139 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3140 struct ql3xxx_port_registers __iomem *port_regs =
3141 qdev->mem_map_registers;
3142 struct sockaddr *addr = p;
3143 unsigned long hw_flags;
3144
3145 if (netif_running(ndev))
3146 return -EBUSY;
3147
3148 if (!is_valid_ether_addr(addr->sa_data))
3149 return -EADDRNOTAVAIL;
3150
3151 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3152
3153 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3154 /* Program lower 32 bits of the MAC address */
3155 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3156 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3157 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3158 ((ndev->dev_addr[2] << 24) | (ndev->
3159 dev_addr[3] << 16) |
3160 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3161
3162 /* Program top 16 bits of the MAC address */
3163 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3164 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3165 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3166 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3167 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3168
3169 return 0;
3170}
3171
3172static void ql3xxx_tx_timeout(struct net_device *ndev)
3173{
3174 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3175
3176 printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
3177 /*
3178 * Stop the queues, we've got a problem.
3179 */
3180 netif_stop_queue(ndev);
3181
3182 /*
3183 * Wake up the worker to process this event.
3184 */
3185 queue_work(qdev->workqueue, &qdev->tx_timeout_work);
3186}
3187
3188static void ql_reset_work(struct ql3_adapter *qdev)
3189{
3190 struct net_device *ndev = qdev->ndev;
3191 u32 value;
3192 struct ql_tx_buf_cb *tx_cb;
3193 int max_wait_time, i;
3194 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3195 unsigned long hw_flags;
3196
3197 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
3198 clear_bit(QL_LINK_MASTER,&qdev->flags);
3199
3200 /*
3201 * Loop through the active list and return the skb.
3202 */
3203 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3204 tx_cb = &qdev->tx_buf[i];
3205 if (tx_cb->skb) {
3206
3207 printk(KERN_DEBUG PFX
3208 "%s: Freeing lost SKB.\n",
3209 qdev->ndev->name);
3210 pci_unmap_single(qdev->pdev,
3211 pci_unmap_addr(tx_cb, mapaddr),
3212 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
3213 dev_kfree_skb(tx_cb->skb);
3214 tx_cb->skb = NULL;
3215 }
3216 }
3217
3218 printk(KERN_ERR PFX
3219 "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3220 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3221 ql_write_common_reg(qdev,
3222 &port_regs->CommonRegs.
3223 ispControlStatus,
3224 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3225 /*
3226 * Wait the for Soft Reset to Complete.
3227 */
3228 max_wait_time = 10;
3229 do {
3230 value = ql_read_common_reg(qdev,
3231 &port_regs->CommonRegs.
3232
3233 ispControlStatus);
3234 if ((value & ISP_CONTROL_SR) == 0) {
3235 printk(KERN_DEBUG PFX
3236 "%s: reset completed.\n",
3237 qdev->ndev->name);
3238 break;
3239 }
3240
3241 if (value & ISP_CONTROL_RI) {
3242 printk(KERN_DEBUG PFX
3243 "%s: clearing NRI after reset.\n",
3244 qdev->ndev->name);
3245 ql_write_common_reg(qdev,
3246 (u32 *) &
3247 port_regs->
3248 CommonRegs.
3249 ispControlStatus,
3250 ((ISP_CONTROL_RI <<
3251 16) | ISP_CONTROL_RI));
3252 }
3253
3254 ssleep(1);
3255 } while (--max_wait_time);
3256 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3257
3258 if (value & ISP_CONTROL_SR) {
3259
3260 /*
3261 * Set the reset flags and clear the board again.
3262 * Nothing else to do...
3263 */
3264 printk(KERN_ERR PFX
3265 "%s: Timed out waiting for reset to "
3266 "complete.\n", ndev->name);
3267 printk(KERN_ERR PFX
3268 "%s: Do a reset.\n", ndev->name);
3269 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3270 clear_bit(QL_RESET_START,&qdev->flags);
3271 ql_cycle_adapter(qdev,QL_DO_RESET);
3272 return;
3273 }
3274
3275 clear_bit(QL_RESET_ACTIVE,&qdev->flags);
3276 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3277 clear_bit(QL_RESET_START,&qdev->flags);
3278 ql_cycle_adapter(qdev,QL_NO_RESET);
3279 }
3280}
3281
3282static void ql_tx_timeout_work(struct ql3_adapter *qdev)
3283{
3284 ql_cycle_adapter(qdev,QL_DO_RESET);
3285}
3286
3287static void ql_get_board_info(struct ql3_adapter *qdev)
3288{
3289 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3290 u32 value;
3291
3292 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3293
3294 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3295 if (value & PORT_STATUS_64)
3296 qdev->pci_width = 64;
3297 else
3298 qdev->pci_width = 32;
3299 if (value & PORT_STATUS_X)
3300 qdev->pci_x = 1;
3301 else
3302 qdev->pci_x = 0;
3303 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3304}
3305
3306static void ql3xxx_timer(unsigned long ptr)
3307{
3308 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3309
3310 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
3311 printk(KERN_DEBUG PFX
3312 "%s: Reset in progress.\n",
3313 qdev->ndev->name);
3314 goto end;
3315 }
3316
3317 ql_link_state_machine(qdev);
3318
3319 /* Restart timer on 2 second interval. */
3320end:
3321 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3322}
3323
3324static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3325 const struct pci_device_id *pci_entry)
3326{
3327 struct net_device *ndev = NULL;
3328 struct ql3_adapter *qdev = NULL;
3329 static int cards_found = 0;
3330 int pci_using_dac, err;
3331
3332 err = pci_enable_device(pdev);
3333 if (err) {
3334 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3335 pci_name(pdev));
3336 goto err_out;
3337 }
3338
3339 err = pci_request_regions(pdev, DRV_NAME);
3340 if (err) {
3341 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3342 pci_name(pdev));
3343 goto err_out_disable_pdev;
3344 }
3345
3346 pci_set_master(pdev);
3347
3348 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3349 pci_using_dac = 1;
3350 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3351 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3352 pci_using_dac = 0;
3353 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3354 }
3355
3356 if (err) {
3357 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3358 pci_name(pdev));
3359 goto err_out_free_regions;
3360 }
3361
3362 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3363 if (!ndev)
3364 goto err_out_free_regions;
3365
3366 SET_MODULE_OWNER(ndev);
3367 SET_NETDEV_DEV(ndev, &pdev->dev);
3368
3369 ndev->features = NETIF_F_LLTX;
3370 if (pci_using_dac)
3371 ndev->features |= NETIF_F_HIGHDMA;
3372
3373 pci_set_drvdata(pdev, ndev);
3374
3375 qdev = netdev_priv(ndev);
3376 qdev->index = cards_found;
3377 qdev->ndev = ndev;
3378 qdev->pdev = pdev;
3379 qdev->port_link_state = LS_DOWN;
3380 if (msi)
3381 qdev->msi = 1;
3382
3383 qdev->msg_enable = netif_msg_init(debug, default_msg);
3384
3385 qdev->mem_map_registers =
3386 ioremap_nocache(pci_resource_start(pdev, 1),
3387 pci_resource_len(qdev->pdev, 1));
3388 if (!qdev->mem_map_registers) {
3389 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3390 pci_name(pdev));
3391 goto err_out_free_ndev;
3392 }
3393
3394 spin_lock_init(&qdev->adapter_lock);
3395 spin_lock_init(&qdev->hw_lock);
3396
3397 /* Set driver entry points */
3398 ndev->open = ql3xxx_open;
3399 ndev->hard_start_xmit = ql3xxx_send;
3400 ndev->stop = ql3xxx_close;
3401 ndev->get_stats = ql3xxx_get_stats;
3402 ndev->change_mtu = ql3xxx_change_mtu;
3403 ndev->set_multicast_list = ql3xxx_set_multicast_list;
3404 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3405 ndev->set_mac_address = ql3xxx_set_mac_address;
3406 ndev->tx_timeout = ql3xxx_tx_timeout;
3407 ndev->watchdog_timeo = 5 * HZ;
3408
3409 ndev->poll = &ql_poll;
3410 ndev->weight = 64;
3411
3412 ndev->irq = pdev->irq;
3413
3414 /* make sure the EEPROM is good */
3415 if (ql_get_nvram_params(qdev)) {
3416 printk(KERN_ALERT PFX
3417 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
3418 qdev->index);
3419 goto err_out_iounmap;
3420 }
3421
3422 ql_set_mac_info(qdev);
3423
3424 /* Validate and set parameters */
3425 if (qdev->mac_index) {
3426 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
3427 ETH_ALEN);
3428 } else {
3429 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
3430 ETH_ALEN);
3431 }
3432 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3433
3434 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
3435
3436 /* Turn off support for multicasting */
3437 ndev->flags &= ~IFF_MULTICAST;
3438
3439 /* Record PCI bus information. */
3440 ql_get_board_info(qdev);
3441
3442 /*
3443 * Set the Maximum Memory Read Byte Count value. We do this to handle
3444 * jumbo frames.
3445 */
3446 if (qdev->pci_x) {
3447 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3448 }
3449
3450 err = register_netdev(ndev);
3451 if (err) {
3452 printk(KERN_ERR PFX "%s: cannot register net device\n",
3453 pci_name(pdev));
3454 goto err_out_iounmap;
3455 }
3456
3457 /* we're going to reset, so assume we have no link for now */
3458
3459 netif_carrier_off(ndev);
3460 netif_stop_queue(ndev);
3461
3462 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3463 INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev);
3464 INIT_WORK(&qdev->tx_timeout_work,
3465 (void (*)(void *))ql_tx_timeout_work, qdev);
3466
3467 init_timer(&qdev->adapter_timer);
3468 qdev->adapter_timer.function = ql3xxx_timer;
3469 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3470 qdev->adapter_timer.data = (unsigned long)qdev;
3471
3472 if(!cards_found) {
3473 printk(KERN_ALERT PFX "%s\n", DRV_STRING);
3474 printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
3475 DRV_NAME, DRV_VERSION);
3476 }
3477 ql_display_dev_info(ndev);
3478
3479 cards_found++;
3480 return 0;
3481
3482err_out_iounmap:
3483 iounmap(qdev->mem_map_registers);
3484err_out_free_ndev:
3485 free_netdev(ndev);
3486err_out_free_regions:
3487 pci_release_regions(pdev);
3488err_out_disable_pdev:
3489 pci_disable_device(pdev);
3490 pci_set_drvdata(pdev, NULL);
3491err_out:
3492 return err;
3493}
3494
3495static void __devexit ql3xxx_remove(struct pci_dev *pdev)
3496{
3497 struct net_device *ndev = pci_get_drvdata(pdev);
3498 struct ql3_adapter *qdev = netdev_priv(ndev);
3499
3500 unregister_netdev(ndev);
3501 qdev = netdev_priv(ndev);
3502
3503 ql_disable_interrupts(qdev);
3504
3505 if (qdev->workqueue) {
3506 cancel_delayed_work(&qdev->reset_work);
3507 cancel_delayed_work(&qdev->tx_timeout_work);
3508 destroy_workqueue(qdev->workqueue);
3509 qdev->workqueue = NULL;
3510 }
3511
3512 iounmap((void *)qdev->mmap_virt_base);
3513 pci_release_regions(pdev);
3514 pci_set_drvdata(pdev, NULL);
3515 free_netdev(ndev);
3516}
3517
3518static struct pci_driver ql3xxx_driver = {
3519
3520 .name = DRV_NAME,
3521 .id_table = ql3xxx_pci_tbl,
3522 .probe = ql3xxx_probe,
3523 .remove = __devexit_p(ql3xxx_remove),
3524};
3525
3526static int __init ql3xxx_init_module(void)
3527{
3528 return pci_register_driver(&ql3xxx_driver);
3529}
3530
3531static void __exit ql3xxx_exit(void)
3532{
3533 pci_unregister_driver(&ql3xxx_driver);
3534}
3535
3536module_init(ql3xxx_init_module);
3537module_exit(ql3xxx_exit);
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
new file mode 100644
index 000000000000..9492cee6b083
--- /dev/null
+++ b/drivers/net/qla3xxx.h
@@ -0,0 +1,1194 @@
1/*
2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */
7#ifndef _QLA3XXX_H_
8#define _QLA3XXX_H_
9
10/*
11 * IOCB Definitions...
12 */
13#pragma pack(1)
14
15#define OPCODE_OB_MAC_IOCB_FN0 0x01
16#define OPCODE_OB_MAC_IOCB_FN2 0x21
17#define OPCODE_OB_TCP_IOCB_FN0 0x03
18#define OPCODE_OB_TCP_IOCB_FN2 0x23
19#define OPCODE_UPDATE_NCB_IOCB_FN0 0x00
20#define OPCODE_UPDATE_NCB_IOCB_FN2 0x20
21
22#define OPCODE_UPDATE_NCB_IOCB 0xF0
23#define OPCODE_IB_MAC_IOCB 0xF9
24#define OPCODE_IB_IP_IOCB 0xFA
25#define OPCODE_IB_TCP_IOCB 0xFB
26#define OPCODE_DUMP_PROTO_IOCB 0xFE
27#define OPCODE_BUFFER_ALERT_IOCB 0xFB
28
29#define OPCODE_FUNC_ID_MASK 0x30
30#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */
31#define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */
32#define UPDATE_NCB_IOCB 0x00 /* plus function bits */
33
34#define FN0_MA_BITS_MASK 0x00
35#define FN1_MA_BITS_MASK 0x80
36
37struct ob_mac_iocb_req {
38 u8 opcode;
39 u8 flags;
40#define OB_MAC_IOCB_REQ_MA 0xC0
41#define OB_MAC_IOCB_REQ_F 0x20
42#define OB_MAC_IOCB_REQ_X 0x10
43#define OB_MAC_IOCB_REQ_D 0x02
44#define OB_MAC_IOCB_REQ_I 0x01
45 __le16 reserved0;
46
47 __le32 transaction_id;
48 __le16 data_len;
49 __le16 reserved1;
50 __le32 reserved2;
51 __le32 reserved3;
52 __le32 buf_addr0_low;
53 __le32 buf_addr0_high;
54 __le32 buf_0_len;
55 __le32 buf_addr1_low;
56 __le32 buf_addr1_high;
57 __le32 buf_1_len;
58 __le32 buf_addr2_low;
59 __le32 buf_addr2_high;
60 __le32 buf_2_len;
61 __le32 reserved4;
62 __le32 reserved5;
63};
64/*
65 * The following constants define control bits for buffer
66 * length fields for all IOCB's.
67 */
68#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */
69#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */
70#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */
71#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */
72
73struct ob_mac_iocb_rsp {
74 u8 opcode;
75 u8 flags;
76#define OB_MAC_IOCB_RSP_P 0x08
77#define OB_MAC_IOCB_RSP_S 0x02
78#define OB_MAC_IOCB_RSP_I 0x01
79
80 __le16 reserved0;
81 __le32 transaction_id;
82 __le32 reserved1;
83 __le32 reserved2;
84};
85
86struct ib_mac_iocb_rsp {
87 u8 opcode;
88 u8 flags;
89#define IB_MAC_IOCB_RSP_S 0x80
90#define IB_MAC_IOCB_RSP_H1 0x40
91#define IB_MAC_IOCB_RSP_H0 0x20
92#define IB_MAC_IOCB_RSP_B 0x10
93#define IB_MAC_IOCB_RSP_M 0x08
94#define IB_MAC_IOCB_RSP_MA 0x07
95
96 __le16 length;
97 __le32 reserved;
98 __le32 ial_low;
99 __le32 ial_high;
100
101};
102
103struct ob_ip_iocb_req {
104 u8 opcode;
105 __le16 flags;
106#define OB_IP_IOCB_REQ_O 0x100
107#define OB_IP_IOCB_REQ_H 0x008
108#define OB_IP_IOCB_REQ_U 0x004
109#define OB_IP_IOCB_REQ_D 0x002
110#define OB_IP_IOCB_REQ_I 0x001
111
112 u8 reserved0;
113
114 __le32 transaction_id;
115 __le16 data_len;
116 __le16 reserved1;
117 __le32 hncb_ptr_low;
118 __le32 hncb_ptr_high;
119 __le32 buf_addr0_low;
120 __le32 buf_addr0_high;
121 __le32 buf_0_len;
122 __le32 buf_addr1_low;
123 __le32 buf_addr1_high;
124 __le32 buf_1_len;
125 __le32 buf_addr2_low;
126 __le32 buf_addr2_high;
127 __le32 buf_2_len;
128 __le32 reserved2;
129 __le32 reserved3;
130};
131
132/* defines for BufferLength fields above */
133#define OB_IP_IOCB_REQ_E 0x80000000
134#define OB_IP_IOCB_REQ_C 0x40000000
135#define OB_IP_IOCB_REQ_L 0x20000000
136#define OB_IP_IOCB_REQ_R 0x10000000
137
138struct ob_ip_iocb_rsp {
139 u8 opcode;
140 u8 flags;
141#define OB_MAC_IOCB_RSP_E 0x08
142#define OB_MAC_IOCB_RSP_L 0x04
143#define OB_MAC_IOCB_RSP_S 0x02
144#define OB_MAC_IOCB_RSP_I 0x01
145
146 __le16 reserved0;
147 __le32 transaction_id;
148 __le32 reserved1;
149 __le32 reserved2;
150};
151
152struct ob_tcp_iocb_req {
153 u8 opcode;
154
155 u8 flags0;
156#define OB_TCP_IOCB_REQ_P 0x80
157#define OB_TCP_IOCB_REQ_CI 0x20
158#define OB_TCP_IOCB_REQ_H 0x10
159#define OB_TCP_IOCB_REQ_LN 0x08
160#define OB_TCP_IOCB_REQ_K 0x04
161#define OB_TCP_IOCB_REQ_D 0x02
162#define OB_TCP_IOCB_REQ_I 0x01
163
164 u8 flags1;
165#define OB_TCP_IOCB_REQ_OSM 0x40
166#define OB_TCP_IOCB_REQ_URG 0x20
167#define OB_TCP_IOCB_REQ_ACK 0x10
168#define OB_TCP_IOCB_REQ_PSH 0x08
169#define OB_TCP_IOCB_REQ_RST 0x04
170#define OB_TCP_IOCB_REQ_SYN 0x02
171#define OB_TCP_IOCB_REQ_FIN 0x01
172
173 u8 options_len;
174#define OB_TCP_IOCB_REQ_OMASK 0xF0
175#define OB_TCP_IOCB_REQ_SHIFT 4
176
177 __le32 transaction_id;
178 __le32 data_len;
179 __le32 hncb_ptr_low;
180 __le32 hncb_ptr_high;
181 __le32 buf_addr0_low;
182 __le32 buf_addr0_high;
183 __le32 buf_0_len;
184 __le32 buf_addr1_low;
185 __le32 buf_addr1_high;
186 __le32 buf_1_len;
187 __le32 buf_addr2_low;
188 __le32 buf_addr2_high;
189 __le32 buf_2_len;
190 __le32 time_stamp;
191 __le32 reserved1;
192};
193
194struct ob_tcp_iocb_rsp {
195 u8 opcode;
196
197 u8 flags0;
198#define OB_TCP_IOCB_RSP_C 0x20
199#define OB_TCP_IOCB_RSP_H 0x10
200#define OB_TCP_IOCB_RSP_LN 0x08
201#define OB_TCP_IOCB_RSP_K 0x04
202#define OB_TCP_IOCB_RSP_D 0x02
203#define OB_TCP_IOCB_RSP_I 0x01
204
205 u8 flags1;
206#define OB_TCP_IOCB_RSP_E 0x10
207#define OB_TCP_IOCB_RSP_W 0x08
208#define OB_TCP_IOCB_RSP_P 0x04
209#define OB_TCP_IOCB_RSP_T 0x02
210#define OB_TCP_IOCB_RSP_F 0x01
211
212 u8 state;
213#define OB_TCP_IOCB_RSP_SMASK 0xF0
214#define OB_TCP_IOCB_RSP_SHIFT 4
215
216 __le32 transaction_id;
217 __le32 local_ncb_ptr;
218 __le32 reserved0;
219};
220
221struct ib_ip_iocb_rsp {
222 u8 opcode;
223 u8 flags;
224#define IB_IP_IOCB_RSP_S 0x80
225#define IB_IP_IOCB_RSP_H1 0x40
226#define IB_IP_IOCB_RSP_H0 0x20
227#define IB_IP_IOCB_RSP_B 0x10
228#define IB_IP_IOCB_RSP_M 0x08
229#define IB_IP_IOCB_RSP_MA 0x07
230
231 __le16 length;
232 __le16 checksum;
233 __le16 reserved;
234#define IB_IP_IOCB_RSP_R 0x01
235 __le32 ial_low;
236 __le32 ial_high;
237};
238
239struct ib_tcp_iocb_rsp {
240 u8 opcode;
241 u8 flags;
242#define IB_TCP_IOCB_RSP_P 0x80
243#define IB_TCP_IOCB_RSP_T 0x40
244#define IB_TCP_IOCB_RSP_D 0x20
245#define IB_TCP_IOCB_RSP_N 0x10
246#define IB_TCP_IOCB_RSP_IP 0x03
247#define IB_TCP_FLAG_MASK 0xf0
248#define IB_TCP_FLAG_IOCB_SYN 0x00
249
250#define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK)
251
252 __le16 length;
253 __le32 hncb_ref_num;
254 __le32 ial_low;
255 __le32 ial_high;
256};
257
258struct net_rsp_iocb {
259 u8 opcode;
260 u8 flags;
261 __le16 reserved0;
262 __le32 reserved[3];
263};
264#pragma pack()
265
266/*
267 * Register Definitions...
268 */
269#define PORT0_PHY_ADDRESS 0x1e00
270#define PORT1_PHY_ADDRESS 0x1f00
271
272#define ETHERNET_CRC_SIZE 4
273
274#define MII_SCAN_REGISTER 0x00000001
275
276/* 32-bit ispControlStatus */
277enum {
278 ISP_CONTROL_NP_MASK = 0x0003,
279 ISP_CONTROL_NP_PCSR = 0x0000,
280 ISP_CONTROL_NP_HMCR = 0x0001,
281 ISP_CONTROL_NP_LRAMCR = 0x0002,
282 ISP_CONTROL_NP_PSR = 0x0003,
283 ISP_CONTROL_RI = 0x0008,
284 ISP_CONTROL_CI = 0x0010,
285 ISP_CONTROL_PI = 0x0020,
286 ISP_CONTROL_IN = 0x0040,
287 ISP_CONTROL_BE = 0x0080,
288 ISP_CONTROL_FN_MASK = 0x0700,
289 ISP_CONTROL_FN0_NET = 0x0400,
290 ISP_CONTROL_FN0_SCSI = 0x0500,
291 ISP_CONTROL_FN1_NET = 0x0600,
292 ISP_CONTROL_FN1_SCSI = 0x0700,
293 ISP_CONTROL_LINK_DN_0 = 0x0800,
294 ISP_CONTROL_LINK_DN_1 = 0x1000,
295 ISP_CONTROL_FSR = 0x2000,
296 ISP_CONTROL_FE = 0x4000,
297 ISP_CONTROL_SR = 0x8000,
298};
299
300/* 32-bit ispInterruptMaskReg */
301enum {
302 ISP_IMR_ENABLE_INT = 0x0004,
303 ISP_IMR_DISABLE_RESET_INT = 0x0008,
304 ISP_IMR_DISABLE_CMPL_INT = 0x0010,
305 ISP_IMR_DISABLE_PROC_INT = 0x0020,
306};
307
308/* 32-bit serialPortInterfaceReg */
309enum {
310 ISP_SERIAL_PORT_IF_CLK = 0x0001,
311 ISP_SERIAL_PORT_IF_CS = 0x0002,
312 ISP_SERIAL_PORT_IF_D0 = 0x0004,
313 ISP_SERIAL_PORT_IF_DI = 0x0008,
314 ISP_NVRAM_MASK = (0x000F << 16),
315 ISP_SERIAL_PORT_IF_WE = 0x0010,
316 ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F,
317 ISP_SERIAL_PORT_IF_SCI = 0x0400,
318 ISP_SERIAL_PORT_IF_SC0 = 0x0800,
319 ISP_SERIAL_PORT_IF_SCE = 0x1000,
320 ISP_SERIAL_PORT_IF_SDI = 0x2000,
321 ISP_SERIAL_PORT_IF_SDO = 0x4000,
322 ISP_SERIAL_PORT_IF_SDE = 0x8000,
323 ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00,
324};
325
326/* semaphoreReg */
327enum {
328 QL_RESOURCE_MASK_BASE_CODE = 0x7,
329 QL_RESOURCE_BITS_BASE_CODE = 0x4,
330 QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1),
331 QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4),
332 QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7),
333 QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10),
334 QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13),
335 QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)),
336 QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)),
337 QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)),
338 QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)),
339 QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)),
340};
341
342 /*
343 * QL3XXX memory-mapped registers
344 * QL3XXX has 4 "pages" of registers, each page occupying
345 * 256 bytes. Each page has a "common" area at the start and then
346 * page-specific registers after that.
347 */
348struct ql3xxx_common_registers {
349 u32 MB0; /* Offset 0x00 */
350 u32 MB1; /* Offset 0x04 */
351 u32 MB2; /* Offset 0x08 */
352 u32 MB3; /* Offset 0x0c */
353 u32 MB4; /* Offset 0x10 */
354 u32 MB5; /* Offset 0x14 */
355 u32 MB6; /* Offset 0x18 */
356 u32 MB7; /* Offset 0x1c */
357 u32 flashBiosAddr;
358 u32 flashBiosData;
359 u32 ispControlStatus;
360 u32 ispInterruptMaskReg;
361 u32 serialPortInterfaceReg;
362 u32 semaphoreReg;
363 u32 reqQProducerIndex;
364 u32 rspQConsumerIndex;
365
366 u32 rxLargeQProducerIndex;
367 u32 rxSmallQProducerIndex;
368 u32 arcMadiCommand;
369 u32 arcMadiData;
370};
371
372enum {
373 EXT_HW_CONFIG_SP_MASK = 0x0006,
374 EXT_HW_CONFIG_SP_NONE = 0x0000,
375 EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002,
376 EXT_HW_CONFIG_SP_ECC = 0x0004,
377 EXT_HW_CONFIG_SP_ECCx = 0x0006,
378 EXT_HW_CONFIG_SIZE_MASK = 0x0060,
379 EXT_HW_CONFIG_SIZE_128M = 0x0000,
380 EXT_HW_CONFIG_SIZE_256M = 0x0020,
381 EXT_HW_CONFIG_SIZE_512M = 0x0040,
382 EXT_HW_CONFIG_SIZE_INVALID = 0x0060,
383 EXT_HW_CONFIG_PD = 0x0080,
384 EXT_HW_CONFIG_FW = 0x0200,
385 EXT_HW_CONFIG_US = 0x0400,
386 EXT_HW_CONFIG_DCS_MASK = 0x1800,
387 EXT_HW_CONFIG_DCS_9MA = 0x0000,
388 EXT_HW_CONFIG_DCS_15MA = 0x0800,
389 EXT_HW_CONFIG_DCS_18MA = 0x1000,
390 EXT_HW_CONFIG_DCS_24MA = 0x1800,
391 EXT_HW_CONFIG_DDS_MASK = 0x6000,
392 EXT_HW_CONFIG_DDS_9MA = 0x0000,
393 EXT_HW_CONFIG_DDS_15MA = 0x2000,
394 EXT_HW_CONFIG_DDS_18MA = 0x4000,
395 EXT_HW_CONFIG_DDS_24MA = 0x6000,
396};
397
398/* InternalChipConfig */
399enum {
400 INTERNAL_CHIP_DM = 0x0001,
401 INTERNAL_CHIP_SD = 0x0002,
402 INTERNAL_CHIP_RAP_MASK = 0x000C,
403 INTERNAL_CHIP_RAP_RR = 0x0000,
404 INTERNAL_CHIP_RAP_NRM = 0x0004,
405 INTERNAL_CHIP_RAP_ERM = 0x0008,
406 INTERNAL_CHIP_RAP_ERMx = 0x000C,
407 INTERNAL_CHIP_WE = 0x0010,
408 INTERNAL_CHIP_EF = 0x0020,
409 INTERNAL_CHIP_FR = 0x0040,
410 INTERNAL_CHIP_FW = 0x0080,
411 INTERNAL_CHIP_FI = 0x0100,
412 INTERNAL_CHIP_FT = 0x0200,
413};
414
415/* portControl */
416enum {
417 PORT_CONTROL_DS = 0x0001,
418 PORT_CONTROL_HH = 0x0002,
419 PORT_CONTROL_EI = 0x0004,
420 PORT_CONTROL_ET = 0x0008,
421 PORT_CONTROL_EF = 0x0010,
422 PORT_CONTROL_DRM = 0x0020,
423 PORT_CONTROL_RLB = 0x0040,
424 PORT_CONTROL_RCB = 0x0080,
425 PORT_CONTROL_MAC = 0x0100,
426 PORT_CONTROL_IPV = 0x0200,
427 PORT_CONTROL_IFP = 0x0400,
428 PORT_CONTROL_ITP = 0x0800,
429 PORT_CONTROL_FI = 0x1000,
430 PORT_CONTROL_DFP = 0x2000,
431 PORT_CONTROL_OI = 0x4000,
432 PORT_CONTROL_CC = 0x8000,
433};
434
435/* portStatus */
436enum {
437 PORT_STATUS_SM0 = 0x0001,
438 PORT_STATUS_SM1 = 0x0002,
439 PORT_STATUS_X = 0x0008,
440 PORT_STATUS_DL = 0x0080,
441 PORT_STATUS_IC = 0x0200,
442 PORT_STATUS_MRC = 0x0400,
443 PORT_STATUS_NL = 0x0800,
444 PORT_STATUS_REV_ID_MASK = 0x7000,
445 PORT_STATUS_REV_ID_1 = 0x1000,
446 PORT_STATUS_REV_ID_2 = 0x2000,
447 PORT_STATUS_REV_ID_3 = 0x3000,
448 PORT_STATUS_64 = 0x8000,
449 PORT_STATUS_UP0 = 0x10000,
450 PORT_STATUS_AC0 = 0x20000,
451 PORT_STATUS_AE0 = 0x40000,
452 PORT_STATUS_UP1 = 0x100000,
453 PORT_STATUS_AC1 = 0x200000,
454 PORT_STATUS_AE1 = 0x400000,
455 PORT_STATUS_F0_ENABLED = 0x1000000,
456 PORT_STATUS_F1_ENABLED = 0x2000000,
457 PORT_STATUS_F2_ENABLED = 0x4000000,
458 PORT_STATUS_F3_ENABLED = 0x8000000,
459};
460
461/* macMIIMgmtControlReg */
462enum {
463 MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003,
464 MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000,
465 MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001,
466 MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002,
467 MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003,
468 MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008,
469 MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010,
470 MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020,
471 MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040,
472 MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080,
473};
474
475/* macMIIMgmtControlReg */
476enum {
477 MAC_MII_CONTROL_RC = 0x0001,
478 MAC_MII_CONTROL_SC = 0x0002,
479 MAC_MII_CONTROL_AS = 0x0004,
480 MAC_MII_CONTROL_NP = 0x0008,
481 MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070,
482 MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000,
483 MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010,
484 MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020,
485 MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030,
486 MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040,
487 MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050,
488 MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060,
489 MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070,
490 MAC_MII_CONTROL_RM = 0x8000,
491};
492
493/* macMIIStatusReg */
494enum {
495 MAC_MII_STATUS_BSY = 0x0001,
496 MAC_MII_STATUS_SC = 0x0002,
497 MAC_MII_STATUS_NV = 0x0004,
498};
499
500enum {
501 MAC_CONFIG_REG_PE = 0x0001,
502 MAC_CONFIG_REG_TF = 0x0002,
503 MAC_CONFIG_REG_RF = 0x0004,
504 MAC_CONFIG_REG_FD = 0x0008,
505 MAC_CONFIG_REG_GM = 0x0010,
506 MAC_CONFIG_REG_LB = 0x0020,
507 MAC_CONFIG_REG_SR = 0x8000,
508};
509
510enum {
511 MAC_HALF_DUPLEX_REG_ED = 0x10000,
512 MAC_HALF_DUPLEX_REG_NB = 0x20000,
513 MAC_HALF_DUPLEX_REG_BNB = 0x40000,
514 MAC_HALF_DUPLEX_REG_ALT = 0x80000,
515};
516
517enum {
518 IP_ADDR_INDEX_REG_MASK = 0x000f,
519 IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000,
520 IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001,
521 IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002,
522 IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003,
523 IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004,
524 IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
525 IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
526 IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
527};
528
529enum {
530 PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f,
531 PROBE_MUX_ADDR_REG_SYSCLK = 0x0000,
532 PROBE_MUX_ADDR_REG_PCICLK = 0x0040,
533 PROBE_MUX_ADDR_REG_NRXCLK = 0x0080,
534 PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0,
535 PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00,
536 PROBE_MUX_ADDR_REG_UP = 0x4000,
537 PROBE_MUX_ADDR_REG_RE = 0x8000,
538};
539
540enum {
541 STATISTICS_INDEX_REG_MASK = 0x01ff,
542 STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000,
543 STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001,
544 STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002,
545 STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003,
546 STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004,
547 STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005,
548 STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006,
549 STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007,
550 STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008,
551 STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009,
552 STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a,
553 STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b,
554 STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c,
555 STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d,
556 STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e,
557 STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f,
558 STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010,
559 STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011,
560 STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012,
561 STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013,
562 STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014,
563 STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015,
564 STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016,
565 STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017,
566 STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018,
567 STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019,
568 STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a,
569 STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b,
570 STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c,
571 STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d,
572 STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e,
573 STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f,
574 STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020,
575 STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021,
576 STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022,
577 STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023,
578 STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024,
579 STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025,
580 STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026,
581 STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027,
582 STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028,
583 STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029,
584 STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030,
585 STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031,
586 STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032,
587 STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033,
588 STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034,
589 STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035,
590 STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036,
591 STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037,
592 STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038,
593 STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f,
594};
595
596enum {
597 PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001,
598 PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002,
599 PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004,
600 PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008,
601 PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010,
602 PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020,
603 PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040,
604 PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080,
605 PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100,
606 PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200,
607 PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400,
608 PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800,
609 PORT_FATAL_ERROR_STATUS_BLE = 0x00001000,
610 PORT_FATAL_ERROR_STATUS_SPE = 0x00002000,
611 PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000,
612 PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000,
613 PORT_FATAL_ERROR_STATUS_ICE = 0x00010000,
614 PORT_FATAL_ERROR_STATUS_ILE = 0x00020000,
615 PORT_FATAL_ERROR_STATUS_OPE = 0x00040000,
616 PORT_FATAL_ERROR_STATUS_TA = 0x00080000,
617 PORT_FATAL_ERROR_STATUS_MA = 0x00100000,
618 PORT_FATAL_ERROR_STATUS_SCE = 0x00200000,
619 PORT_FATAL_ERROR_STATUS_RPE = 0x00400000,
620 PORT_FATAL_ERROR_STATUS_MPE = 0x00800000,
621 PORT_FATAL_ERROR_STATUS_OCE = 0x01000000,
622};
623
624/*
625 * port control and status page - page 0
626 */
627
628struct ql3xxx_port_registers {
629 struct ql3xxx_common_registers CommonRegs;
630
631 u32 ExternalHWConfig;
632 u32 InternalChipConfig;
633 u32 portControl;
634 u32 portStatus;
635 u32 macAddrIndirectPtrReg;
636 u32 macAddrDataReg;
637 u32 macMIIMgmtControlReg;
638 u32 macMIIMgmtAddrReg;
639 u32 macMIIMgmtDataReg;
640 u32 macMIIStatusReg;
641 u32 mac0ConfigReg;
642 u32 mac0IpgIfgReg;
643 u32 mac0HalfDuplexReg;
644 u32 mac0MaxFrameLengthReg;
645 u32 mac0PauseThresholdReg;
646 u32 mac1ConfigReg;
647 u32 mac1IpgIfgReg;
648 u32 mac1HalfDuplexReg;
649 u32 mac1MaxFrameLengthReg;
650 u32 mac1PauseThresholdReg;
651 u32 ipAddrIndexReg;
652 u32 ipAddrDataReg;
653 u32 ipReassemblyTimeout;
654 u32 tcpMaxWindow;
655 u32 currentTcpTimestamp[2];
656 u32 internalRamRWAddrReg;
657 u32 internalRamWDataReg;
658 u32 reclaimedBufferAddrRegLow;
659 u32 reclaimedBufferAddrRegHigh;
660 u32 reserved[2];
661 u32 fpgaRevID;
662 u32 localRamAddr;
663 u32 localRamDataAutoIncr;
664 u32 localRamDataNonIncr;
665 u32 gpOutput;
666 u32 gpInput;
667 u32 probeMuxAddr;
668 u32 probeMuxData;
669 u32 statisticsIndexReg;
670 u32 statisticsReadDataRegAutoIncr;
671 u32 statisticsReadDataRegNoIncr;
672 u32 PortFatalErrStatus;
673};
674
675/*
676 * port host memory config page - page 1
677 */
678struct ql3xxx_host_memory_registers {
679 struct ql3xxx_common_registers CommonRegs;
680
681 u32 reserved[12];
682
683 /* Network Request Queue */
684 u32 reqConsumerIndex;
685 u32 reqConsumerIndexAddrLow;
686 u32 reqConsumerIndexAddrHigh;
687 u32 reqBaseAddrLow;
688 u32 reqBaseAddrHigh;
689 u32 reqLength;
690
691 /* Network Completion Queue */
692 u32 rspProducerIndex;
693 u32 rspProducerIndexAddrLow;
694 u32 rspProducerIndexAddrHigh;
695 u32 rspBaseAddrLow;
696 u32 rspBaseAddrHigh;
697 u32 rspLength;
698
699 /* RX Large Buffer Queue */
700 u32 rxLargeQConsumerIndex;
701 u32 rxLargeQBaseAddrLow;
702 u32 rxLargeQBaseAddrHigh;
703 u32 rxLargeQLength;
704 u32 rxLargeBufferLength;
705
706 /* RX Small Buffer Queue */
707 u32 rxSmallQConsumerIndex;
708 u32 rxSmallQBaseAddrLow;
709 u32 rxSmallQBaseAddrHigh;
710 u32 rxSmallQLength;
711 u32 rxSmallBufferLength;
712
713};
714
715/*
716 * port local RAM page - page 2
717 */
718struct ql3xxx_local_ram_registers {
719 struct ql3xxx_common_registers CommonRegs;
720 u32 bufletSize;
721 u32 maxBufletCount;
722 u32 currentBufletCount;
723 u32 reserved;
724 u32 freeBufletThresholdLow;
725 u32 freeBufletThresholdHigh;
726 u32 ipHashTableBase;
727 u32 ipHashTableCount;
728 u32 tcpHashTableBase;
729 u32 tcpHashTableCount;
730 u32 ncbBase;
731 u32 maxNcbCount;
732 u32 currentNcbCount;
733 u32 drbBase;
734 u32 maxDrbCount;
735 u32 currentDrbCount;
736};
737
738/*
739 * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register
740 */
741
742#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x))
743#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) )
744
745/*
746 * I/O register
747 */
748
749enum {
750 CONTROL_REG = 0,
751 STATUS_REG = 1,
752 PHY_STAT_LINK_UP = 0x0004,
753 PHY_CTRL_LOOPBACK = 0x4000,
754
755 PETBI_CONTROL_REG = 0x00,
756 PETBI_CTRL_SOFT_RESET = 0x8000,
757 PETBI_CTRL_AUTO_NEG = 0x1000,
758 PETBI_CTRL_RESTART_NEG = 0x0200,
759 PETBI_CTRL_FULL_DUPLEX = 0x0100,
760 PETBI_CTRL_SPEED_1000 = 0x0040,
761
762 PETBI_STATUS_REG = 0x01,
763 PETBI_STAT_NEG_DONE = 0x0020,
764 PETBI_STAT_LINK_UP = 0x0004,
765
766 PETBI_NEG_ADVER = 0x04,
767 PETBI_NEG_PAUSE = 0x0080,
768 PETBI_NEG_PAUSE_MASK = 0x0180,
769 PETBI_NEG_DUPLEX = 0x0020,
770 PETBI_NEG_DUPLEX_MASK = 0x0060,
771
772 PETBI_NEG_PARTNER = 0x05,
773 PETBI_NEG_ERROR_MASK = 0x3000,
774
775 PETBI_EXPANSION_REG = 0x06,
776 PETBI_EXP_PAGE_RX = 0x0002,
777
778 PETBI_TBI_CTRL = 0x11,
779 PETBI_TBI_RESET = 0x8000,
780 PETBI_TBI_AUTO_SENSE = 0x0100,
781 PETBI_TBI_SERDES_MODE = 0x0010,
782 PETBI_TBI_SERDES_WRAP = 0x0002,
783
784 AUX_CONTROL_STATUS = 0x1c,
785 PHY_AUX_NEG_DONE = 0x8000,
786 PHY_NEG_PARTNER = 5,
787 PHY_AUX_DUPLEX_STAT = 0x0020,
788 PHY_AUX_SPEED_STAT = 0x0018,
789 PHY_AUX_NO_HW_STRAP = 0x0004,
790 PHY_AUX_RESET_STICK = 0x0002,
791 PHY_NEG_PAUSE = 0x0400,
792 PHY_CTRL_SOFT_RESET = 0x8000,
793 PHY_NEG_ADVER = 4,
794 PHY_NEG_ADV_SPEED = 0x01e0,
795 PHY_CTRL_RESTART_NEG = 0x0200,
796};
797enum {
798/* AM29LV Flash definitions */
799 FM93C56A_START = 0x1,
800/* Commands */
801 FM93C56A_READ = 0x2,
802 FM93C56A_WEN = 0x0,
803 FM93C56A_WRITE = 0x1,
804 FM93C56A_WRITE_ALL = 0x0,
805 FM93C56A_WDS = 0x0,
806 FM93C56A_ERASE = 0x3,
807 FM93C56A_ERASE_ALL = 0x0,
808/* Command Extentions */
809 FM93C56A_WEN_EXT = 0x3,
810 FM93C56A_WRITE_ALL_EXT = 0x1,
811 FM93C56A_WDS_EXT = 0x0,
812 FM93C56A_ERASE_ALL_EXT = 0x2,
813/* Special Bits */
814 FM93C56A_READ_DUMMY_BITS = 1,
815 FM93C56A_READY = 0,
816 FM93C56A_BUSY = 1,
817 FM93C56A_CMD_BITS = 2,
818/* AM29LV Flash definitions */
819 FM93C56A_SIZE_8 = 0x100,
820 FM93C56A_SIZE_16 = 0x80,
821 FM93C66A_SIZE_8 = 0x200,
822 FM93C66A_SIZE_16 = 0x100,
823 FM93C86A_SIZE_16 = 0x400,
824/* Address Bits */
825 FM93C56A_NO_ADDR_BITS_16 = 8,
826 FM93C56A_NO_ADDR_BITS_8 = 9,
827 FM93C86A_NO_ADDR_BITS_16 = 10,
828/* Data Bits */
829 FM93C56A_DATA_BITS_16 = 16,
830 FM93C56A_DATA_BITS_8 = 8,
831};
832enum {
833/* Auburn Bits */
834 AUBURN_EEPROM_DI = 0x8,
835 AUBURN_EEPROM_DI_0 = 0x0,
836 AUBURN_EEPROM_DI_1 = 0x8,
837 AUBURN_EEPROM_DO = 0x4,
838 AUBURN_EEPROM_DO_0 = 0x0,
839 AUBURN_EEPROM_DO_1 = 0x4,
840 AUBURN_EEPROM_CS = 0x2,
841 AUBURN_EEPROM_CS_0 = 0x0,
842 AUBURN_EEPROM_CS_1 = 0x2,
843 AUBURN_EEPROM_CLK_RISE = 0x1,
844 AUBURN_EEPROM_CLK_FALL = 0x0,
845};
846enum {EEPROM_SIZE = FM93C86A_SIZE_16,
847 EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16,
848 EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16,
849};
850
851/*
852 * MAC Config data structure
853 */
854 struct eeprom_port_cfg {
855 u16 etherMtu_mac;
856 u16 pauseThreshold_mac;
857 u16 resumeThreshold_mac;
858 u16 portConfiguration;
859#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000
860#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000
861#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000
862#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000
863#define PORT_CONFIG_1000MB_SPEED 0x0400
864#define PORT_CONFIG_100MB_SPEED 0x0200
865#define PORT_CONFIG_10MB_SPEED 0x0100
866#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00
867 u16 reserved[12];
868
869};
870
871/*
872 * BIOS data structure
873 */
874struct eeprom_bios_cfg {
875 u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12;
876
877 u8 bootID0:7, boodID0Valid:1;
878 u8 bootLun0[8];
879
880 u8 bootID1:7, boodID1Valid:1;
881 u8 bootLun1[8];
882
883 u16 MaxLunsTrgt;
884 u8 reserved[10];
885};
886
887/*
888 * Function Specific Data structure
889 */
890struct eeprom_function_cfg {
891 u8 reserved[30];
892 u8 macAddress[6];
893 u8 macAddressSecondary[6];
894
895 u16 subsysVendorId;
896 u16 subsysDeviceId;
897};
898
899/*
900 * EEPROM format
901 */
902struct eeprom_data {
903 u8 asicId[4];
904 u8 version;
905 u8 numPorts;
906 u16 boardId;
907
908#define EEPROM_BOARDID_STR_SIZE 16
909#define EEPROM_SERIAL_NUM_SIZE 16
910
911 u8 boardIdStr[16];
912 u8 serialNumber[16];
913 u16 extHwConfig;
914 struct eeprom_port_cfg macCfg_port0;
915 struct eeprom_port_cfg macCfg_port1;
916 u16 bufletSize;
917 u16 bufletCount;
918 u16 tcpWindowThreshold50;
919 u16 tcpWindowThreshold25;
920 u16 tcpWindowThreshold0;
921 u16 ipHashTableBaseHi;
922 u16 ipHashTableBaseLo;
923 u16 ipHashTableSize;
924 u16 tcpHashTableBaseHi;
925 u16 tcpHashTableBaseLo;
926 u16 tcpHashTableSize;
927 u16 ncbTableBaseHi;
928 u16 ncbTableBaseLo;
929 u16 ncbTableSize;
930 u16 drbTableBaseHi;
931 u16 drbTableBaseLo;
932 u16 drbTableSize;
933 u16 reserved_142[4];
934 u16 ipReassemblyTimeout;
935 u16 tcpMaxWindowSize;
936 u16 ipSecurity;
937#define IPSEC_CONFIG_PRESENT 0x0001
938 u8 reserved_156[294];
939 u16 qDebug[8];
940 struct eeprom_function_cfg funcCfg_fn0;
941 u16 reserved_510;
942 u8 oemSpace[432];
943 struct eeprom_bios_cfg biosCfg_fn1;
944 struct eeprom_function_cfg funcCfg_fn1;
945 u16 reserved_1022;
946 u8 reserved_1024[464];
947 struct eeprom_function_cfg funcCfg_fn2;
948 u16 reserved_1534;
949 u8 reserved_1536[432];
950 struct eeprom_bios_cfg biosCfg_fn3;
951 struct eeprom_function_cfg funcCfg_fn3;
952 u16 checksum;
953};
954
955/*
956 * General definitions...
957 */
958
959/*
960 * Below are a number compiler switches for controlling driver behavior.
961 * Some are not supported under certain conditions and are notated as such.
962 */
963
964#define QL3XXX_VENDOR_ID 0x1077
965#define QL3022_DEVICE_ID 0x3022
966
967/* MTU & Frame Size stuff */
968#define NORMAL_MTU_SIZE ETH_DATA_LEN
969#define JUMBO_MTU_SIZE 9000
970#define VLAN_ID_LEN 2
971
972/* Request Queue Related Definitions */
973#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */
974
975/* Response Queue Related Definitions */
976#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */
977
978/* Transmit and Receive Buffers */
979#define NUM_LBUFQ_ENTRIES 128
980#define NUM_SBUFQ_ENTRIES 64
981#define QL_SMALL_BUFFER_SIZE 32
982#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
983(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
984 /* Each send has at least control block. This is how many we keep. */
985#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
986#define NUM_LARGE_BUFFERS NUM_LBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
987#define QL_HEADER_SPACE 32 /* make header space at top of skb. */
988/*
989 * Large & Small Buffers for Receives
990 */
991struct lrg_buf_q_entry {
992
993 u32 addr0_lower;
994#define IAL_LAST_ENTRY 0x00000001
995#define IAL_CONT_ENTRY 0x00000002
996#define IAL_FLAG_MASK 0x00000003
997 u32 addr0_upper;
998 u32 addr1_lower;
999 u32 addr1_upper;
1000 u32 addr2_lower;
1001 u32 addr2_upper;
1002 u32 addr3_lower;
1003 u32 addr3_upper;
1004 u32 addr4_lower;
1005 u32 addr4_upper;
1006 u32 addr5_lower;
1007 u32 addr5_upper;
1008 u32 addr6_lower;
1009 u32 addr6_upper;
1010 u32 addr7_lower;
1011 u32 addr7_upper;
1012
1013};
1014
1015struct bufq_addr_element {
1016 u32 addr_low;
1017 u32 addr_high;
1018};
1019
1020#define QL_NO_RESET 0
1021#define QL_DO_RESET 1
1022
1023enum link_state_t {
1024 LS_UNKNOWN = 0,
1025 LS_DOWN,
1026 LS_DEGRADE,
1027 LS_RECOVER,
1028 LS_UP,
1029};
1030
1031struct ql_rcv_buf_cb {
1032 struct ql_rcv_buf_cb *next;
1033 struct sk_buff *skb;
1034 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1035 DECLARE_PCI_UNMAP_LEN(maplen);
1036 __le32 buf_phy_addr_low;
1037 __le32 buf_phy_addr_high;
1038 int index;
1039};
1040
1041struct ql_tx_buf_cb {
1042 struct sk_buff *skb;
1043 struct ob_mac_iocb_req *queue_entry ;
1044 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1045 DECLARE_PCI_UNMAP_LEN(maplen);
1046};
1047
1048/* definitions for type field */
1049#define QL_BUF_TYPE_MACIOCB 0x01
1050#define QL_BUF_TYPE_IPIOCB 0x02
1051#define QL_BUF_TYPE_TCPIOCB 0x03
1052
1053/* qdev->flags definitions. */
1054enum { QL_RESET_DONE = 1, /* Reset finished. */
1055 QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */
1056 QL_RESET_START = 3, /* Please reset the chip. */
1057 QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */
1058 QL_TX_TIMEOUT = 5, /* Timeout in progress. */
1059 QL_LINK_MASTER = 6, /* This driver controls the link. */
1060 QL_ADAPTER_UP = 7, /* Adapter has been brought up. */
1061 QL_THREAD_UP = 8, /* This flag is available. */
1062 QL_LINK_UP = 9, /* Link Status. */
1063 QL_ALLOC_REQ_RSP_Q_DONE = 10,
1064 QL_ALLOC_BUFQS_DONE = 11,
1065 QL_ALLOC_SMALL_BUF_DONE = 12,
1066 QL_LINK_OPTICAL = 13,
1067 QL_MSI_ENABLED = 14,
1068};
1069
1070/*
1071 * ql3_adapter - The main Adapter structure definition.
1072 * This structure has all fields relevant to the hardware.
1073 */
1074
1075struct ql3_adapter {
1076 u32 reserved_00;
1077 unsigned long flags;
1078
1079 /* PCI Configuration information for this device */
1080 struct pci_dev *pdev;
1081 struct net_device *ndev; /* Parent NET device */
1082
1083 /* Hardware information */
1084 u8 chip_rev_id;
1085 u8 pci_slot;
1086 u8 pci_width;
1087 u8 pci_x;
1088 u32 msi;
1089 int index;
1090 struct timer_list adapter_timer; /* timer used for various functions */
1091
1092 spinlock_t adapter_lock;
1093 spinlock_t hw_lock;
1094
1095 /* PCI Bus Relative Register Addresses */
1096 u8 *mmap_virt_base; /* stores return value from ioremap() */
1097 struct ql3xxx_port_registers __iomem *mem_map_registers;
1098 u32 current_page; /* tracks current register page */
1099
1100 u32 msg_enable;
1101 u8 reserved_01[2];
1102 u8 reserved_02[2];
1103
1104 /* Page for Shadow Registers */
1105 void *shadow_reg_virt_addr;
1106 dma_addr_t shadow_reg_phy_addr;
1107
1108 /* Net Request Queue */
1109 u32 req_q_size;
1110 u32 reserved_03;
1111 struct ob_mac_iocb_req *req_q_virt_addr;
1112 dma_addr_t req_q_phy_addr;
1113 u16 req_producer_index;
1114 u16 reserved_04;
1115 u16 *preq_consumer_index;
1116 u32 req_consumer_index_phy_addr_high;
1117 u32 req_consumer_index_phy_addr_low;
1118 atomic_t tx_count;
1119 struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES];
1120
1121 /* Net Response Queue */
1122 u32 rsp_q_size;
1123 u32 eeprom_cmd_data;
1124 struct net_rsp_iocb *rsp_q_virt_addr;
1125 dma_addr_t rsp_q_phy_addr;
1126 struct net_rsp_iocb *rsp_current;
1127 u16 rsp_consumer_index;
1128 u16 reserved_06;
1129 u32 *prsp_producer_index;
1130 u32 rsp_producer_index_phy_addr_high;
1131 u32 rsp_producer_index_phy_addr_low;
1132
1133 /* Large Buffer Queue */
1134 u32 lrg_buf_q_alloc_size;
1135 u32 lrg_buf_q_size;
1136 void *lrg_buf_q_alloc_virt_addr;
1137 void *lrg_buf_q_virt_addr;
1138 dma_addr_t lrg_buf_q_alloc_phy_addr;
1139 dma_addr_t lrg_buf_q_phy_addr;
1140 u32 lrg_buf_q_producer_index;
1141 u32 lrg_buf_release_cnt;
1142 struct bufq_addr_element *lrg_buf_next_free;
1143
1144 /* Large (Receive) Buffers */
1145 struct ql_rcv_buf_cb lrg_buf[NUM_LARGE_BUFFERS];
1146 struct ql_rcv_buf_cb *lrg_buf_free_head;
1147 struct ql_rcv_buf_cb *lrg_buf_free_tail;
1148 u32 lrg_buf_free_count;
1149 u32 lrg_buffer_len;
1150 u32 lrg_buf_index;
1151 u32 lrg_buf_skb_check;
1152
1153 /* Small Buffer Queue */
1154 u32 small_buf_q_alloc_size;
1155 u32 small_buf_q_size;
1156 u32 small_buf_q_producer_index;
1157 void *small_buf_q_alloc_virt_addr;
1158 void *small_buf_q_virt_addr;
1159 dma_addr_t small_buf_q_alloc_phy_addr;
1160 dma_addr_t small_buf_q_phy_addr;
1161 u32 small_buf_index;
1162
1163 /* Small (Receive) Buffers */
1164 void *small_buf_virt_addr;
1165 dma_addr_t small_buf_phy_addr;
1166 u32 small_buf_phy_addr_low;
1167 u32 small_buf_phy_addr_high;
1168 u32 small_buf_release_cnt;
1169 u32 small_buf_total_size;
1170
1171 /* ISR related, saves status for DPC. */
1172 u32 control_status;
1173
1174 struct eeprom_data nvram_data;
1175 struct timer_list ioctl_timer;
1176 u32 port_link_state;
1177 u32 last_rsp_offset;
1178
1179 /* 4022 specific */
1180 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
1181 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
1182 u32 mac_ob_opcode; /* Opcode to use on mac transmission */
1183 u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */
1184 u32 update_ob_opcode; /* Opcode to use for updating NCB */
1185 u32 mb_bit_mask; /* MA Bits mask to use on transmission */
1186 u32 numPorts;
1187 struct net_device_stats stats;
1188 struct workqueue_struct *workqueue;
1189 struct work_struct reset_work;
1190 struct work_struct tx_timeout_work;
1191 u32 max_frame_size;
1192};
1193
1194#endif /* _QLA3XXX_H_ */