aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qla3xxx.c
diff options
context:
space:
mode:
authorRon Mercer <ron.mercer@qlogic.com>2006-07-25 03:40:21 -0400
committerJeff Garzik <jeff@garzik.org>2006-07-29 00:28:51 -0400
commit5a4faa873782d748960b02fdec95e3d4d2e3ae38 (patch)
tree7faa11e95d9a881a4914529268cae11f210d5c97 /drivers/net/qla3xxx.c
parent572e432e01c1f7c64ad8375a6e0d935b0e40ebf5 (diff)
[PATCH] qla3xxx NIC driver
This is a complementary network driver for our ISP4XXX parts. There is a concurrent effort underway to get the iSCSI driver (qla4xxx) integrated upstream as well. I have been through several iterations with the linux-netdev list and have had much response from Stephen Hemminger. - Built and tested using kernel 2.6.17-rc4. - The chip supports two ethernet and two iSCSI functions. - The functions ql_sem_lock, ql_sem_spinlock, ql_sem_unlock, and ql_wait_for_drvr_lock are used to protect resources that are shared across the network and iSCSI functions. This protection is mostly during chip initialization and resets, but also include link management. - The PHY/MII are not exported through ethtool due to the fact that the iSCSI function will control the common link at least 50% of the time. This driver has been through several iterations on the netdev list and we feel this driver is ready for inclusion in the upstream kernel. It has been built and tested on x86 and PPC64 platforms. Cc: Jeff Garzik <jeff@garzik.org> Cc: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/qla3xxx.c')
-rw-r--r--drivers/net/qla3xxx.c3537
1 files changed, 3537 insertions, 0 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
new file mode 100644
index 000000000000..c729aeeb4696
--- /dev/null
+++ b/drivers/net/qla3xxx.c
@@ -0,0 +1,3537 @@
1/*
2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/dmapool.h>
18#include <linux/mempool.h>
19#include <linux/spinlock.h>
20#include <linux/kthread.h>
21#include <linux/interrupt.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/ip.h>
25#include <linux/if_arp.h>
26#include <linux/if_ether.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/ethtool.h>
30#include <linux/skbuff.h>
31#include <linux/rtnetlink.h>
32#include <linux/if_vlan.h>
33#include <linux/init.h>
34#include <linux/delay.h>
35#include <linux/mm.h>
36
37#include "qla3xxx.h"
38
39#define DRV_NAME "qla3xxx"
40#define DRV_STRING "QLogic ISP3XXX Network Driver"
41#define DRV_VERSION "v2.02.00-k36"
42#define PFX DRV_NAME " "
43
44static const char ql3xxx_driver_name[] = DRV_NAME;
45static const char ql3xxx_driver_version[] = DRV_VERSION;
46
47MODULE_AUTHOR("QLogic Corporation");
48MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
49MODULE_LICENSE("GPL");
50MODULE_VERSION(DRV_VERSION);
51
52static const u32 default_msg
53 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
54 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
55
56static int debug = -1; /* defaults above */
57module_param(debug, int, 0);
58MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
59
60static int msi;
61module_param(msi, int, 0);
62MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63
64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
66 /* required last entry */
67 {0,}
68};
69
70MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
71
72/*
73 * Caller must take hw_lock.
74 */
75static int ql_sem_spinlock(struct ql3_adapter *qdev,
76 u32 sem_mask, u32 sem_bits)
77{
78 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
79 u32 value;
80 unsigned int seconds = 3;
81
82 do {
83 writel((sem_mask | sem_bits),
84 &port_regs->CommonRegs.semaphoreReg);
85 value = readl(&port_regs->CommonRegs.semaphoreReg);
86 if ((value & (sem_mask >> 16)) == sem_bits)
87 return 0;
88 ssleep(1);
89 } while(--seconds);
90 return -1;
91}
92
93static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
94{
95 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
96 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
97 readl(&port_regs->CommonRegs.semaphoreReg);
98}
99
100static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
101{
102 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
103 u32 value;
104
105 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
106 value = readl(&port_regs->CommonRegs.semaphoreReg);
107 return ((value & (sem_mask >> 16)) == sem_bits);
108}
109
110/*
111 * Caller holds hw_lock.
112 */
113static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
114{
115 int i = 0;
116
117 while (1) {
118 if (!ql_sem_lock(qdev,
119 QL_DRVR_SEM_MASK,
120 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
121 * 2) << 1)) {
122 if (i < 10) {
123 ssleep(1);
124 i++;
125 } else {
126 printk(KERN_ERR PFX "%s: Timed out waiting for "
127 "driver lock...\n",
128 qdev->ndev->name);
129 return 0;
130 }
131 } else {
132 printk(KERN_DEBUG PFX
133 "%s: driver lock acquired.\n",
134 qdev->ndev->name);
135 return 1;
136 }
137 }
138}
139
140static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
141{
142 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
143
144 writel(((ISP_CONTROL_NP_MASK << 16) | page),
145 &port_regs->CommonRegs.ispControlStatus);
146 readl(&port_regs->CommonRegs.ispControlStatus);
147 qdev->current_page = page;
148}
149
150static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
151 u32 __iomem * reg)
152{
153 u32 value;
154 unsigned long hw_flags;
155
156 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
157 value = readl(reg);
158 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
159
160 return value;
161}
162
163static u32 ql_read_common_reg(struct ql3_adapter *qdev,
164 u32 __iomem * reg)
165{
166 return readl(reg);
167}
168
169static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
170{
171 u32 value;
172 unsigned long hw_flags;
173
174 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
175
176 if (qdev->current_page != 0)
177 ql_set_register_page(qdev,0);
178 value = readl(reg);
179
180 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
181 return value;
182}
183
184static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
185{
186 if (qdev->current_page != 0)
187 ql_set_register_page(qdev,0);
188 return readl(reg);
189}
190
191static void ql_write_common_reg_l(struct ql3_adapter *qdev,
192 u32 * reg, u32 value)
193{
194 unsigned long hw_flags;
195
196 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
197 writel(value, (u32 *) reg);
198 readl(reg);
199 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
200 return;
201}
202
203static void ql_write_common_reg(struct ql3_adapter *qdev,
204 u32 * reg, u32 value)
205{
206 writel(value, (u32 *) reg);
207 readl(reg);
208 return;
209}
210
211static void ql_write_page0_reg(struct ql3_adapter *qdev,
212 u32 * reg, u32 value)
213{
214 if (qdev->current_page != 0)
215 ql_set_register_page(qdev,0);
216 writel(value, (u32 *) reg);
217 readl(reg);
218 return;
219}
220
221/*
222 * Caller holds hw_lock. Only called during init.
223 */
224static void ql_write_page1_reg(struct ql3_adapter *qdev,
225 u32 * reg, u32 value)
226{
227 if (qdev->current_page != 1)
228 ql_set_register_page(qdev,1);
229 writel(value, (u32 *) reg);
230 readl(reg);
231 return;
232}
233
234/*
235 * Caller holds hw_lock. Only called during init.
236 */
237static void ql_write_page2_reg(struct ql3_adapter *qdev,
238 u32 * reg, u32 value)
239{
240 if (qdev->current_page != 2)
241 ql_set_register_page(qdev,2);
242 writel(value, (u32 *) reg);
243 readl(reg);
244 return;
245}
246
247static void ql_disable_interrupts(struct ql3_adapter *qdev)
248{
249 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
250
251 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
252 (ISP_IMR_ENABLE_INT << 16));
253
254}
255
256static void ql_enable_interrupts(struct ql3_adapter *qdev)
257{
258 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
259
260 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
261 ((0xff << 16) | ISP_IMR_ENABLE_INT));
262
263}
264
265static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
266 struct ql_rcv_buf_cb *lrg_buf_cb)
267{
268 u64 map;
269 lrg_buf_cb->next = NULL;
270
271 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
272 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
273 } else {
274 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
275 qdev->lrg_buf_free_tail = lrg_buf_cb;
276 }
277
278 if (!lrg_buf_cb->skb) {
279 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
280 if (unlikely(!lrg_buf_cb->skb)) {
281 printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n",
282 qdev->ndev->name);
283 qdev->lrg_buf_skb_check++;
284 } else {
285 /*
286 * We save some space to copy the ethhdr from first
287 * buffer
288 */
289 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
290 map = pci_map_single(qdev->pdev,
291 lrg_buf_cb->skb->data,
292 qdev->lrg_buffer_len -
293 QL_HEADER_SPACE,
294 PCI_DMA_FROMDEVICE);
295 lrg_buf_cb->buf_phy_addr_low =
296 cpu_to_le32(LS_64BITS(map));
297 lrg_buf_cb->buf_phy_addr_high =
298 cpu_to_le32(MS_64BITS(map));
299 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
300 pci_unmap_len_set(lrg_buf_cb, maplen,
301 qdev->lrg_buffer_len -
302 QL_HEADER_SPACE);
303 }
304 }
305
306 qdev->lrg_buf_free_count++;
307}
308
309static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
310 *qdev)
311{
312 struct ql_rcv_buf_cb *lrg_buf_cb;
313
314 if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
315 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
316 qdev->lrg_buf_free_tail = NULL;
317 qdev->lrg_buf_free_count--;
318 }
319
320 return lrg_buf_cb;
321}
322
323static u32 addrBits = EEPROM_NO_ADDR_BITS;
324static u32 dataBits = EEPROM_NO_DATA_BITS;
325
326static void fm93c56a_deselect(struct ql3_adapter *qdev);
327static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
328 unsigned short *value);
329
330/*
331 * Caller holds hw_lock.
332 */
333static void fm93c56a_select(struct ql3_adapter *qdev)
334{
335 struct ql3xxx_port_registers __iomem *port_regs =
336 qdev->mem_map_registers;
337
338 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
339 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
340 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
341 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
342 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
343}
344
345/*
346 * Caller holds hw_lock.
347 */
348static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
349{
350 int i;
351 u32 mask;
352 u32 dataBit;
353 u32 previousBit;
354 struct ql3xxx_port_registers __iomem *port_regs =
355 qdev->mem_map_registers;
356
357 /* Clock in a zero, then do the start bit */
358 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
359 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
360 AUBURN_EEPROM_DO_1);
361 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
362 ISP_NVRAM_MASK | qdev->
363 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
364 AUBURN_EEPROM_CLK_RISE);
365 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
366 ISP_NVRAM_MASK | qdev->
367 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
368 AUBURN_EEPROM_CLK_FALL);
369
370 mask = 1 << (FM93C56A_CMD_BITS - 1);
371 /* Force the previous data bit to be different */
372 previousBit = 0xffff;
373 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
374 dataBit =
375 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
376 if (previousBit != dataBit) {
377 /*
378 * If the bit changed, then change the DO state to
379 * match
380 */
381 ql_write_common_reg(qdev,
382 &port_regs->CommonRegs.
383 serialPortInterfaceReg,
384 ISP_NVRAM_MASK | qdev->
385 eeprom_cmd_data | dataBit);
386 previousBit = dataBit;
387 }
388 ql_write_common_reg(qdev,
389 &port_regs->CommonRegs.
390 serialPortInterfaceReg,
391 ISP_NVRAM_MASK | qdev->
392 eeprom_cmd_data | dataBit |
393 AUBURN_EEPROM_CLK_RISE);
394 ql_write_common_reg(qdev,
395 &port_regs->CommonRegs.
396 serialPortInterfaceReg,
397 ISP_NVRAM_MASK | qdev->
398 eeprom_cmd_data | dataBit |
399 AUBURN_EEPROM_CLK_FALL);
400 cmd = cmd << 1;
401 }
402
403 mask = 1 << (addrBits - 1);
404 /* Force the previous data bit to be different */
405 previousBit = 0xffff;
406 for (i = 0; i < addrBits; i++) {
407 dataBit =
408 (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
409 AUBURN_EEPROM_DO_0;
410 if (previousBit != dataBit) {
411 /*
412 * If the bit changed, then change the DO state to
413 * match
414 */
415 ql_write_common_reg(qdev,
416 &port_regs->CommonRegs.
417 serialPortInterfaceReg,
418 ISP_NVRAM_MASK | qdev->
419 eeprom_cmd_data | dataBit);
420 previousBit = dataBit;
421 }
422 ql_write_common_reg(qdev,
423 &port_regs->CommonRegs.
424 serialPortInterfaceReg,
425 ISP_NVRAM_MASK | qdev->
426 eeprom_cmd_data | dataBit |
427 AUBURN_EEPROM_CLK_RISE);
428 ql_write_common_reg(qdev,
429 &port_regs->CommonRegs.
430 serialPortInterfaceReg,
431 ISP_NVRAM_MASK | qdev->
432 eeprom_cmd_data | dataBit |
433 AUBURN_EEPROM_CLK_FALL);
434 eepromAddr = eepromAddr << 1;
435 }
436}
437
438/*
439 * Caller holds hw_lock.
440 */
441static void fm93c56a_deselect(struct ql3_adapter *qdev)
442{
443 struct ql3xxx_port_registers __iomem *port_regs =
444 qdev->mem_map_registers;
445 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
446 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
447 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
448}
449
450/*
451 * Caller holds hw_lock.
452 */
453static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
454{
455 int i;
456 u32 data = 0;
457 u32 dataBit;
458 struct ql3xxx_port_registers __iomem *port_regs =
459 qdev->mem_map_registers;
460
461 /* Read the data bits */
462 /* The first bit is a dummy. Clock right over it. */
463 for (i = 0; i < dataBits; i++) {
464 ql_write_common_reg(qdev,
465 &port_regs->CommonRegs.
466 serialPortInterfaceReg,
467 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
468 AUBURN_EEPROM_CLK_RISE);
469 ql_write_common_reg(qdev,
470 &port_regs->CommonRegs.
471 serialPortInterfaceReg,
472 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
473 AUBURN_EEPROM_CLK_FALL);
474 dataBit =
475 (ql_read_common_reg
476 (qdev,
477 &port_regs->CommonRegs.
478 serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
479 data = (data << 1) | dataBit;
480 }
481 *value = (u16) data;
482}
483
484/*
485 * Caller holds hw_lock.
486 */
487static void eeprom_readword(struct ql3_adapter *qdev,
488 u32 eepromAddr, unsigned short *value)
489{
490 fm93c56a_select(qdev);
491 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
492 fm93c56a_datain(qdev, value);
493 fm93c56a_deselect(qdev);
494}
495
496static void ql_swap_mac_addr(u8 * macAddress)
497{
498#ifdef __BIG_ENDIAN
499 u8 temp;
500 temp = macAddress[0];
501 macAddress[0] = macAddress[1];
502 macAddress[1] = temp;
503 temp = macAddress[2];
504 macAddress[2] = macAddress[3];
505 macAddress[3] = temp;
506 temp = macAddress[4];
507 macAddress[4] = macAddress[5];
508 macAddress[5] = temp;
509#endif
510}
511
512static int ql_get_nvram_params(struct ql3_adapter *qdev)
513{
514 u16 *pEEPROMData;
515 u16 checksum = 0;
516 u32 index;
517 unsigned long hw_flags;
518
519 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
520
521 pEEPROMData = (u16 *) & qdev->nvram_data;
522 qdev->eeprom_cmd_data = 0;
523 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
524 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
525 2) << 10)) {
526 printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
527 __func__);
528 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
529 return -1;
530 }
531
532 for (index = 0; index < EEPROM_SIZE; index++) {
533 eeprom_readword(qdev, index, pEEPROMData);
534 checksum += *pEEPROMData;
535 pEEPROMData++;
536 }
537 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
538
539 if (checksum != 0) {
540 printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
541 qdev->ndev->name, checksum);
542 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
543 return -1;
544 }
545
546 /*
547 * We have a problem with endianness for the MAC addresses
548 * and the two 8-bit values version, and numPorts. We
549 * have to swap them on big endian systems.
550 */
551 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress);
552 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress);
553 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress);
554 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress);
555 pEEPROMData = (u16 *) & qdev->nvram_data.version;
556 *pEEPROMData = le16_to_cpu(*pEEPROMData);
557
558 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
559 return checksum;
560}
561
562static const u32 PHYAddr[2] = {
563 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
564};
565
566static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
567{
568 struct ql3xxx_port_registers __iomem *port_regs =
569 qdev->mem_map_registers;
570 u32 temp;
571 int count = 1000;
572
573 while (count) {
574 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
575 if (!(temp & MAC_MII_STATUS_BSY))
576 return 0;
577 udelay(10);
578 count--;
579 }
580 return -1;
581}
582
583static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
584{
585 struct ql3xxx_port_registers __iomem *port_regs =
586 qdev->mem_map_registers;
587 u32 scanControl;
588
589 if (qdev->numPorts > 1) {
590 /* Auto scan will cycle through multiple ports */
591 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
592 } else {
593 scanControl = MAC_MII_CONTROL_SC;
594 }
595
596 /*
597 * Scan register 1 of PHY/PETBI,
598 * Set up to scan both devices
599 * The autoscan starts from the first register, completes
600 * the last one before rolling over to the first
601 */
602 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
603 PHYAddr[0] | MII_SCAN_REGISTER);
604
605 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
606 (scanControl) |
607 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
608}
609
610static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
611{
612 u8 ret;
613 struct ql3xxx_port_registers __iomem *port_regs =
614 qdev->mem_map_registers;
615
616 /* See if scan mode is enabled before we turn it off */
617 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
618 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
619 /* Scan is enabled */
620 ret = 1;
621 } else {
622 /* Scan is disabled */
623 ret = 0;
624 }
625
626 /*
627 * When disabling scan mode you must first change the MII register
628 * address
629 */
630 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
631 PHYAddr[0] | MII_SCAN_REGISTER);
632
633 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
634 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
635 MAC_MII_CONTROL_RC) << 16));
636
637 return ret;
638}
639
640static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
641 u16 regAddr, u16 value, u32 mac_index)
642{
643 struct ql3xxx_port_registers __iomem *port_regs =
644 qdev->mem_map_registers;
645 u8 scanWasEnabled;
646
647 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
648
649 if (ql_wait_for_mii_ready(qdev)) {
650 if (netif_msg_link(qdev))
651 printk(KERN_WARNING PFX
652 "%s Timed out waiting for management port to "
653 "get free before issuing command.\n",
654 qdev->ndev->name);
655 return -1;
656 }
657
658 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
659 PHYAddr[mac_index] | regAddr);
660
661 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
662
663 /* Wait for write to complete 9/10/04 SJP */
664 if (ql_wait_for_mii_ready(qdev)) {
665 if (netif_msg_link(qdev))
666 printk(KERN_WARNING PFX
667 "%s: Timed out waiting for management port to"
668 "get free before issuing command.\n",
669 qdev->ndev->name);
670 return -1;
671 }
672
673 if (scanWasEnabled)
674 ql_mii_enable_scan_mode(qdev);
675
676 return 0;
677}
678
679static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
680 u16 * value, u32 mac_index)
681{
682 struct ql3xxx_port_registers __iomem *port_regs =
683 qdev->mem_map_registers;
684 u8 scanWasEnabled;
685 u32 temp;
686
687 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
688
689 if (ql_wait_for_mii_ready(qdev)) {
690 if (netif_msg_link(qdev))
691 printk(KERN_WARNING PFX
692 "%s: Timed out waiting for management port to "
693 "get free before issuing command.\n",
694 qdev->ndev->name);
695 return -1;
696 }
697
698 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
699 PHYAddr[mac_index] | regAddr);
700
701 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
702 (MAC_MII_CONTROL_RC << 16));
703
704 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
705 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
706
707 /* Wait for the read to complete */
708 if (ql_wait_for_mii_ready(qdev)) {
709 if (netif_msg_link(qdev))
710 printk(KERN_WARNING PFX
711 "%s: Timed out waiting for management port to "
712 "get free after issuing command.\n",
713 qdev->ndev->name);
714 return -1;
715 }
716
717 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
718 *value = (u16) temp;
719
720 if (scanWasEnabled)
721 ql_mii_enable_scan_mode(qdev);
722
723 return 0;
724}
725
726static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
727{
728 struct ql3xxx_port_registers __iomem *port_regs =
729 qdev->mem_map_registers;
730
731 ql_mii_disable_scan_mode(qdev);
732
733 if (ql_wait_for_mii_ready(qdev)) {
734 if (netif_msg_link(qdev))
735 printk(KERN_WARNING PFX
736 "%s: Timed out waiting for management port to "
737 "get free before issuing command.\n",
738 qdev->ndev->name);
739 return -1;
740 }
741
742 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
743 qdev->PHYAddr | regAddr);
744
745 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
746
747 /* Wait for write to complete. */
748 if (ql_wait_for_mii_ready(qdev)) {
749 if (netif_msg_link(qdev))
750 printk(KERN_WARNING PFX
751 "%s: Timed out waiting for management port to "
752 "get free before issuing command.\n",
753 qdev->ndev->name);
754 return -1;
755 }
756
757 ql_mii_enable_scan_mode(qdev);
758
759 return 0;
760}
761
762static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
763{
764 u32 temp;
765 struct ql3xxx_port_registers __iomem *port_regs =
766 qdev->mem_map_registers;
767
768 ql_mii_disable_scan_mode(qdev);
769
770 if (ql_wait_for_mii_ready(qdev)) {
771 if (netif_msg_link(qdev))
772 printk(KERN_WARNING PFX
773 "%s: Timed out waiting for management port to "
774 "get free before issuing command.\n",
775 qdev->ndev->name);
776 return -1;
777 }
778
779 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
780 qdev->PHYAddr | regAddr);
781
782 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
783 (MAC_MII_CONTROL_RC << 16));
784
785 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
786 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
787
788 /* Wait for the read to complete */
789 if (ql_wait_for_mii_ready(qdev)) {
790 if (netif_msg_link(qdev))
791 printk(KERN_WARNING PFX
792 "%s: Timed out waiting for management port to "
793 "get free before issuing command.\n",
794 qdev->ndev->name);
795 return -1;
796 }
797
798 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
799 *value = (u16) temp;
800
801 ql_mii_enable_scan_mode(qdev);
802
803 return 0;
804}
805
806static void ql_petbi_reset(struct ql3_adapter *qdev)
807{
808 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
809}
810
811static void ql_petbi_start_neg(struct ql3_adapter *qdev)
812{
813 u16 reg;
814
815 /* Enable Auto-negotiation sense */
816 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
817 reg |= PETBI_TBI_AUTO_SENSE;
818 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
819
820 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
821 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
822
823 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
824 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
825 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
826
827}
828
829static void ql_petbi_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
830{
831 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
832 mac_index);
833}
834
835static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
836{
837 u16 reg;
838
839 /* Enable Auto-negotiation sense */
840 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, mac_index);
841 reg |= PETBI_TBI_AUTO_SENSE;
842 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, mac_index);
843
844 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
845 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, mac_index);
846
847 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
848 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
849 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
850 mac_index);
851}
852
853static void ql_petbi_init(struct ql3_adapter *qdev)
854{
855 ql_petbi_reset(qdev);
856 ql_petbi_start_neg(qdev);
857}
858
859static void ql_petbi_init_ex(struct ql3_adapter *qdev, u32 mac_index)
860{
861 ql_petbi_reset_ex(qdev, mac_index);
862 ql_petbi_start_neg_ex(qdev, mac_index);
863}
864
865static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
866{
867 u16 reg;
868
869 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
870 return 0;
871
872 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
873}
874
875static int ql_phy_get_speed(struct ql3_adapter *qdev)
876{
877 u16 reg;
878
879 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
880 return 0;
881
882 reg = (((reg & 0x18) >> 3) & 3);
883
884 if (reg == 2)
885 return SPEED_1000;
886 else if (reg == 1)
887 return SPEED_100;
888 else if (reg == 0)
889 return SPEED_10;
890 else
891 return -1;
892}
893
894static int ql_is_full_dup(struct ql3_adapter *qdev)
895{
896 u16 reg;
897
898 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
899 return 0;
900
901 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
902}
903
904static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
905{
906 u16 reg;
907
908 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
909 return 0;
910
911 return (reg & PHY_NEG_PAUSE) != 0;
912}
913
914/*
915 * Caller holds hw_lock.
916 */
917static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
918{
919 struct ql3xxx_port_registers __iomem *port_regs =
920 qdev->mem_map_registers;
921 u32 value;
922
923 if (enable)
924 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
925 else
926 value = (MAC_CONFIG_REG_PE << 16);
927
928 if (qdev->mac_index)
929 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
930 else
931 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
932}
933
934/*
935 * Caller holds hw_lock.
936 */
937static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
938{
939 struct ql3xxx_port_registers __iomem *port_regs =
940 qdev->mem_map_registers;
941 u32 value;
942
943 if (enable)
944 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
945 else
946 value = (MAC_CONFIG_REG_SR << 16);
947
948 if (qdev->mac_index)
949 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
950 else
951 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
952}
953
954/*
955 * Caller holds hw_lock.
956 */
957static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
958{
959 struct ql3xxx_port_registers __iomem *port_regs =
960 qdev->mem_map_registers;
961 u32 value;
962
963 if (enable)
964 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
965 else
966 value = (MAC_CONFIG_REG_GM << 16);
967
968 if (qdev->mac_index)
969 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
970 else
971 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
972}
973
974/*
975 * Caller holds hw_lock.
976 */
977static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
978{
979 struct ql3xxx_port_registers __iomem *port_regs =
980 qdev->mem_map_registers;
981 u32 value;
982
983 if (enable)
984 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
985 else
986 value = (MAC_CONFIG_REG_FD << 16);
987
988 if (qdev->mac_index)
989 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
990 else
991 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
992}
993
994/*
995 * Caller holds hw_lock.
996 */
997static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
998{
999 struct ql3xxx_port_registers __iomem *port_regs =
1000 qdev->mem_map_registers;
1001 u32 value;
1002
1003 if (enable)
1004 value =
1005 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1006 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1007 else
1008 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1009
1010 if (qdev->mac_index)
1011 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1012 else
1013 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1014}
1015
1016/*
1017 * Caller holds hw_lock.
1018 */
1019static int ql_is_fiber(struct ql3_adapter *qdev)
1020{
1021 struct ql3xxx_port_registers __iomem *port_regs =
1022 qdev->mem_map_registers;
1023 u32 bitToCheck = 0;
1024 u32 temp;
1025
1026 switch (qdev->mac_index) {
1027 case 0:
1028 bitToCheck = PORT_STATUS_SM0;
1029 break;
1030 case 1:
1031 bitToCheck = PORT_STATUS_SM1;
1032 break;
1033 }
1034
1035 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1036 return (temp & bitToCheck) != 0;
1037}
1038
1039static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1040{
1041 u16 reg;
1042 ql_mii_read_reg(qdev, 0x00, &reg);
1043 return (reg & 0x1000) != 0;
1044}
1045
1046/*
1047 * Caller holds hw_lock.
1048 */
1049static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1050{
1051 struct ql3xxx_port_registers __iomem *port_regs =
1052 qdev->mem_map_registers;
1053 u32 bitToCheck = 0;
1054 u32 temp;
1055
1056 switch (qdev->mac_index) {
1057 case 0:
1058 bitToCheck = PORT_STATUS_AC0;
1059 break;
1060 case 1:
1061 bitToCheck = PORT_STATUS_AC1;
1062 break;
1063 }
1064
1065 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1066 if (temp & bitToCheck) {
1067 if (netif_msg_link(qdev))
1068 printk(KERN_INFO PFX
1069 "%s: Auto-Negotiate complete.\n",
1070 qdev->ndev->name);
1071 return 1;
1072 } else {
1073 if (netif_msg_link(qdev))
1074 printk(KERN_WARNING PFX
1075 "%s: Auto-Negotiate incomplete.\n",
1076 qdev->ndev->name);
1077 return 0;
1078 }
1079}
1080
1081/*
1082 * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1083 */
1084static int ql_is_neg_pause(struct ql3_adapter *qdev)
1085{
1086 if (ql_is_fiber(qdev))
1087 return ql_is_petbi_neg_pause(qdev);
1088 else
1089 return ql_is_phy_neg_pause(qdev);
1090}
1091
1092static int ql_auto_neg_error(struct ql3_adapter *qdev)
1093{
1094 struct ql3xxx_port_registers __iomem *port_regs =
1095 qdev->mem_map_registers;
1096 u32 bitToCheck = 0;
1097 u32 temp;
1098
1099 switch (qdev->mac_index) {
1100 case 0:
1101 bitToCheck = PORT_STATUS_AE0;
1102 break;
1103 case 1:
1104 bitToCheck = PORT_STATUS_AE1;
1105 break;
1106 }
1107 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1108 return (temp & bitToCheck) != 0;
1109}
1110
1111static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1112{
1113 if (ql_is_fiber(qdev))
1114 return SPEED_1000;
1115 else
1116 return ql_phy_get_speed(qdev);
1117}
1118
1119static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1120{
1121 if (ql_is_fiber(qdev))
1122 return 1;
1123 else
1124 return ql_is_full_dup(qdev);
1125}
1126
1127/*
1128 * Caller holds hw_lock.
1129 */
1130static int ql_link_down_detect(struct ql3_adapter *qdev)
1131{
1132 struct ql3xxx_port_registers __iomem *port_regs =
1133 qdev->mem_map_registers;
1134 u32 bitToCheck = 0;
1135 u32 temp;
1136
1137 switch (qdev->mac_index) {
1138 case 0:
1139 bitToCheck = ISP_CONTROL_LINK_DN_0;
1140 break;
1141 case 1:
1142 bitToCheck = ISP_CONTROL_LINK_DN_1;
1143 break;
1144 }
1145
1146 temp =
1147 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1148 return (temp & bitToCheck) != 0;
1149}
1150
1151/*
1152 * Caller holds hw_lock.
1153 */
1154static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1155{
1156 struct ql3xxx_port_registers __iomem *port_regs =
1157 qdev->mem_map_registers;
1158
1159 switch (qdev->mac_index) {
1160 case 0:
1161 ql_write_common_reg(qdev,
1162 &port_regs->CommonRegs.ispControlStatus,
1163 (ISP_CONTROL_LINK_DN_0) |
1164 (ISP_CONTROL_LINK_DN_0 << 16));
1165 break;
1166
1167 case 1:
1168 ql_write_common_reg(qdev,
1169 &port_regs->CommonRegs.ispControlStatus,
1170 (ISP_CONTROL_LINK_DN_1) |
1171 (ISP_CONTROL_LINK_DN_1 << 16));
1172 break;
1173
1174 default:
1175 return 1;
1176 }
1177
1178 return 0;
1179}
1180
1181/*
1182 * Caller holds hw_lock.
1183 */
1184static int ql_this_adapter_controls_port(struct ql3_adapter *qdev,
1185 u32 mac_index)
1186{
1187 struct ql3xxx_port_registers __iomem *port_regs =
1188 qdev->mem_map_registers;
1189 u32 bitToCheck = 0;
1190 u32 temp;
1191
1192 switch (mac_index) {
1193 case 0:
1194 bitToCheck = PORT_STATUS_F1_ENABLED;
1195 break;
1196 case 1:
1197 bitToCheck = PORT_STATUS_F3_ENABLED;
1198 break;
1199 default:
1200 break;
1201 }
1202
1203 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1204 if (temp & bitToCheck) {
1205 if (netif_msg_link(qdev))
1206 printk(KERN_DEBUG PFX
1207 "%s: is not link master.\n", qdev->ndev->name);
1208 return 0;
1209 } else {
1210 if (netif_msg_link(qdev))
1211 printk(KERN_DEBUG PFX
1212 "%s: is link master.\n", qdev->ndev->name);
1213 return 1;
1214 }
1215}
1216
1217static void ql_phy_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
1218{
1219 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, mac_index);
1220}
1221
1222static void ql_phy_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
1223{
1224 u16 reg;
1225
1226 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER,
1227 PHY_NEG_PAUSE | PHY_NEG_ADV_SPEED | 1, mac_index);
1228
1229 ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, mac_index);
1230 ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG,
1231 mac_index);
1232}
1233
1234static void ql_phy_init_ex(struct ql3_adapter *qdev, u32 mac_index)
1235{
1236 ql_phy_reset_ex(qdev, mac_index);
1237 ql_phy_start_neg_ex(qdev, mac_index);
1238}
1239
1240/*
1241 * Caller holds hw_lock.
1242 */
1243static u32 ql_get_link_state(struct ql3_adapter *qdev)
1244{
1245 struct ql3xxx_port_registers __iomem *port_regs =
1246 qdev->mem_map_registers;
1247 u32 bitToCheck = 0;
1248 u32 temp, linkState;
1249
1250 switch (qdev->mac_index) {
1251 case 0:
1252 bitToCheck = PORT_STATUS_UP0;
1253 break;
1254 case 1:
1255 bitToCheck = PORT_STATUS_UP1;
1256 break;
1257 }
1258 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1259 if (temp & bitToCheck) {
1260 linkState = LS_UP;
1261 } else {
1262 linkState = LS_DOWN;
1263 if (netif_msg_link(qdev))
1264 printk(KERN_WARNING PFX
1265 "%s: Link is down.\n", qdev->ndev->name);
1266 }
1267 return linkState;
1268}
1269
1270static int ql_port_start(struct ql3_adapter *qdev)
1271{
1272 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1273 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1274 2) << 7))
1275 return -1;
1276
1277 if (ql_is_fiber(qdev)) {
1278 ql_petbi_init(qdev);
1279 } else {
1280 /* Copper port */
1281 ql_phy_init_ex(qdev, qdev->mac_index);
1282 }
1283
1284 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1285 return 0;
1286}
1287
1288static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1289{
1290
1291 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1292 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1293 2) << 7))
1294 return -1;
1295
1296 if (!ql_auto_neg_error(qdev)) {
1297 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1298 /* configure the MAC */
1299 if (netif_msg_link(qdev))
1300 printk(KERN_DEBUG PFX
1301 "%s: Configuring link.\n",
1302 qdev->ndev->
1303 name);
1304 ql_mac_cfg_soft_reset(qdev, 1);
1305 ql_mac_cfg_gig(qdev,
1306 (ql_get_link_speed
1307 (qdev) ==
1308 SPEED_1000));
1309 ql_mac_cfg_full_dup(qdev,
1310 ql_is_link_full_dup
1311 (qdev));
1312 ql_mac_cfg_pause(qdev,
1313 ql_is_neg_pause
1314 (qdev));
1315 ql_mac_cfg_soft_reset(qdev, 0);
1316
1317 /* enable the MAC */
1318 if (netif_msg_link(qdev))
1319 printk(KERN_DEBUG PFX
1320 "%s: Enabling mac.\n",
1321 qdev->ndev->
1322 name);
1323 ql_mac_enable(qdev, 1);
1324 }
1325
1326 if (netif_msg_link(qdev))
1327 printk(KERN_DEBUG PFX
1328 "%s: Change port_link_state LS_DOWN to LS_UP.\n",
1329 qdev->ndev->name);
1330 qdev->port_link_state = LS_UP;
1331 netif_start_queue(qdev->ndev);
1332 netif_carrier_on(qdev->ndev);
1333 if (netif_msg_link(qdev))
1334 printk(KERN_INFO PFX
1335 "%s: Link is up at %d Mbps, %s duplex.\n",
1336 qdev->ndev->name,
1337 ql_get_link_speed(qdev),
1338 ql_is_link_full_dup(qdev)
1339 ? "full" : "half");
1340
1341 } else { /* Remote error detected */
1342
1343 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1344 if (netif_msg_link(qdev))
1345 printk(KERN_DEBUG PFX
1346 "%s: Remote error detected. "
1347 "Calling ql_port_start().\n",
1348 qdev->ndev->
1349 name);
1350 /*
1351 * ql_port_start() is shared code and needs
1352 * to lock the PHY on it's own.
1353 */
1354 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1355 if(ql_port_start(qdev)) {/* Restart port */
1356 return -1;
1357 } else
1358 return 0;
1359 }
1360 }
1361 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1362 return 0;
1363}
1364
1365static void ql_link_state_machine(struct ql3_adapter *qdev)
1366{
1367 u32 curr_link_state;
1368 unsigned long hw_flags;
1369
1370 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1371
1372 curr_link_state = ql_get_link_state(qdev);
1373
1374 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1375 if (netif_msg_link(qdev))
1376 printk(KERN_INFO PFX
1377 "%s: Reset in progress, skip processing link "
1378 "state.\n", qdev->ndev->name);
1379 return;
1380 }
1381
1382 switch (qdev->port_link_state) {
1383 default:
1384 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1385 ql_port_start(qdev);
1386 }
1387 qdev->port_link_state = LS_DOWN;
1388 /* Fall Through */
1389
1390 case LS_DOWN:
1391 if (netif_msg_link(qdev))
1392 printk(KERN_DEBUG PFX
1393 "%s: port_link_state = LS_DOWN.\n",
1394 qdev->ndev->name);
1395 if (curr_link_state == LS_UP) {
1396 if (netif_msg_link(qdev))
1397 printk(KERN_DEBUG PFX
1398 "%s: curr_link_state = LS_UP.\n",
1399 qdev->ndev->name);
1400 if (ql_is_auto_neg_complete(qdev))
1401 ql_finish_auto_neg(qdev);
1402
1403 if (qdev->port_link_state == LS_UP)
1404 ql_link_down_detect_clear(qdev);
1405
1406 }
1407 break;
1408
1409 case LS_UP:
1410 /*
1411 * See if the link is currently down or went down and came
1412 * back up
1413 */
1414 if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
1415 if (netif_msg_link(qdev))
1416 printk(KERN_INFO PFX "%s: Link is down.\n",
1417 qdev->ndev->name);
1418 qdev->port_link_state = LS_DOWN;
1419 }
1420 break;
1421 }
1422 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1423}
1424
1425/*
1426 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1427 */
1428static void ql_get_phy_owner(struct ql3_adapter *qdev)
1429{
1430 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1431 set_bit(QL_LINK_MASTER,&qdev->flags);
1432 else
1433 clear_bit(QL_LINK_MASTER,&qdev->flags);
1434}
1435
1436/*
1437 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1438 */
1439static void ql_init_scan_mode(struct ql3_adapter *qdev)
1440{
1441 ql_mii_enable_scan_mode(qdev);
1442
1443 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1444 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1445 ql_petbi_init_ex(qdev, qdev->mac_index);
1446 } else {
1447 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1448 ql_phy_init_ex(qdev, qdev->mac_index);
1449 }
1450}
1451
1452/*
1453 * MII_Setup needs to be called before taking the PHY out of reset so that the
1454 * management interface clock speed can be set properly. It would be better if
1455 * we had a way to disable MDC until after the PHY is out of reset, but we
1456 * don't have that capability.
1457 */
1458static int ql_mii_setup(struct ql3_adapter *qdev)
1459{
1460 u32 reg;
1461 struct ql3xxx_port_registers __iomem *port_regs =
1462 qdev->mem_map_registers;
1463
1464 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1465 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1466 2) << 7))
1467 return -1;
1468
1469 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1470 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1471
1472 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1473 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1474
1475 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1476 return 0;
1477}
1478
1479static u32 ql_supported_modes(struct ql3_adapter *qdev)
1480{
1481 u32 supported;
1482
1483 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1484 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1485 | SUPPORTED_Autoneg;
1486 } else {
1487 supported = SUPPORTED_10baseT_Half
1488 | SUPPORTED_10baseT_Full
1489 | SUPPORTED_100baseT_Half
1490 | SUPPORTED_100baseT_Full
1491 | SUPPORTED_1000baseT_Half
1492 | SUPPORTED_1000baseT_Full
1493 | SUPPORTED_Autoneg | SUPPORTED_TP;
1494 }
1495
1496 return supported;
1497}
1498
1499static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1500{
1501 int status;
1502 unsigned long hw_flags;
1503 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1504 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1505 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1506 2) << 7))
1507 return 0;
1508 status = ql_is_auto_cfg(qdev);
1509 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1510 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1511 return status;
1512}
1513
1514static u32 ql_get_speed(struct ql3_adapter *qdev)
1515{
1516 u32 status;
1517 unsigned long hw_flags;
1518 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1519 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1520 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1521 2) << 7))
1522 return 0;
1523 status = ql_get_link_speed(qdev);
1524 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1525 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1526 return status;
1527}
1528
1529static int ql_get_full_dup(struct ql3_adapter *qdev)
1530{
1531 int status;
1532 unsigned long hw_flags;
1533 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1534 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1535 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1536 2) << 7))
1537 return 0;
1538 status = ql_is_link_full_dup(qdev);
1539 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1540 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1541 return status;
1542}
1543
1544
1545static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1546{
1547 struct ql3_adapter *qdev = netdev_priv(ndev);
1548
1549 ecmd->transceiver = XCVR_INTERNAL;
1550 ecmd->supported = ql_supported_modes(qdev);
1551
1552 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1553 ecmd->port = PORT_FIBRE;
1554 } else {
1555 ecmd->port = PORT_TP;
1556 ecmd->phy_address = qdev->PHYAddr;
1557 }
1558 ecmd->advertising = ql_supported_modes(qdev);
1559 ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1560 ecmd->speed = ql_get_speed(qdev);
1561 ecmd->duplex = ql_get_full_dup(qdev);
1562 return 0;
1563}
1564
1565static void ql_get_drvinfo(struct net_device *ndev,
1566 struct ethtool_drvinfo *drvinfo)
1567{
1568 struct ql3_adapter *qdev = netdev_priv(ndev);
1569 strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
1570 strncpy(drvinfo->version, ql3xxx_driver_version, 32);
1571 strncpy(drvinfo->fw_version, "N/A", 32);
1572 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1573 drvinfo->n_stats = 0;
1574 drvinfo->testinfo_len = 0;
1575 drvinfo->regdump_len = 0;
1576 drvinfo->eedump_len = 0;
1577}
1578
1579static u32 ql_get_msglevel(struct net_device *ndev)
1580{
1581 struct ql3_adapter *qdev = netdev_priv(ndev);
1582 return qdev->msg_enable;
1583}
1584
1585static void ql_set_msglevel(struct net_device *ndev, u32 value)
1586{
1587 struct ql3_adapter *qdev = netdev_priv(ndev);
1588 qdev->msg_enable = value;
1589}
1590
1591static struct ethtool_ops ql3xxx_ethtool_ops = {
1592 .get_settings = ql_get_settings,
1593 .get_drvinfo = ql_get_drvinfo,
1594 .get_perm_addr = ethtool_op_get_perm_addr,
1595 .get_link = ethtool_op_get_link,
1596 .get_msglevel = ql_get_msglevel,
1597 .set_msglevel = ql_set_msglevel,
1598};
1599
1600static int ql_populate_free_queue(struct ql3_adapter *qdev)
1601{
1602 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1603 u64 map;
1604
1605 while (lrg_buf_cb) {
1606 if (!lrg_buf_cb->skb) {
1607 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
1608 if (unlikely(!lrg_buf_cb->skb)) {
1609 printk(KERN_DEBUG PFX
1610 "%s: Failed dev_alloc_skb().\n",
1611 qdev->ndev->name);
1612 break;
1613 } else {
1614 /*
1615 * We save some space to copy the ethhdr from
1616 * first buffer
1617 */
1618 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1619 map = pci_map_single(qdev->pdev,
1620 lrg_buf_cb->skb->data,
1621 qdev->lrg_buffer_len -
1622 QL_HEADER_SPACE,
1623 PCI_DMA_FROMDEVICE);
1624 lrg_buf_cb->buf_phy_addr_low =
1625 cpu_to_le32(LS_64BITS(map));
1626 lrg_buf_cb->buf_phy_addr_high =
1627 cpu_to_le32(MS_64BITS(map));
1628 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1629 pci_unmap_len_set(lrg_buf_cb, maplen,
1630 qdev->lrg_buffer_len -
1631 QL_HEADER_SPACE);
1632 --qdev->lrg_buf_skb_check;
1633 if (!qdev->lrg_buf_skb_check)
1634 return 1;
1635 }
1636 }
1637 lrg_buf_cb = lrg_buf_cb->next;
1638 }
1639 return 0;
1640}
1641
1642/*
1643 * Caller holds hw_lock.
1644 */
1645static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1646{
1647 struct bufq_addr_element *lrg_buf_q_ele;
1648 int i;
1649 struct ql_rcv_buf_cb *lrg_buf_cb;
1650 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1651
1652 if ((qdev->lrg_buf_free_count >= 8)
1653 && (qdev->lrg_buf_release_cnt >= 16)) {
1654
1655 if (qdev->lrg_buf_skb_check)
1656 if (!ql_populate_free_queue(qdev))
1657 return;
1658
1659 lrg_buf_q_ele = qdev->lrg_buf_next_free;
1660
1661 while ((qdev->lrg_buf_release_cnt >= 16)
1662 && (qdev->lrg_buf_free_count >= 8)) {
1663
1664 for (i = 0; i < 8; i++) {
1665 lrg_buf_cb =
1666 ql_get_from_lrg_buf_free_list(qdev);
1667 lrg_buf_q_ele->addr_high =
1668 lrg_buf_cb->buf_phy_addr_high;
1669 lrg_buf_q_ele->addr_low =
1670 lrg_buf_cb->buf_phy_addr_low;
1671 lrg_buf_q_ele++;
1672
1673 qdev->lrg_buf_release_cnt--;
1674 }
1675
1676 qdev->lrg_buf_q_producer_index++;
1677
1678 if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES)
1679 qdev->lrg_buf_q_producer_index = 0;
1680
1681 if (qdev->lrg_buf_q_producer_index ==
1682 (NUM_LBUFQ_ENTRIES - 1)) {
1683 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1684 }
1685 }
1686
1687 qdev->lrg_buf_next_free = lrg_buf_q_ele;
1688
1689 ql_write_common_reg(qdev,
1690 (u32 *) & port_regs->CommonRegs.
1691 rxLargeQProducerIndex,
1692 qdev->lrg_buf_q_producer_index);
1693 }
1694}
1695
1696static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1697 struct ob_mac_iocb_rsp *mac_rsp)
1698{
1699 struct ql_tx_buf_cb *tx_cb;
1700
1701 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1702 pci_unmap_single(qdev->pdev,
1703 pci_unmap_addr(tx_cb, mapaddr),
1704 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
1705 dev_kfree_skb_irq(tx_cb->skb);
1706 qdev->stats.tx_packets++;
1707 qdev->stats.tx_bytes += tx_cb->skb->len;
1708 tx_cb->skb = NULL;
1709 atomic_inc(&qdev->tx_count);
1710}
1711
1712static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1713 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1714{
1715 long int offset;
1716 u32 lrg_buf_phy_addr_low = 0;
1717 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1718 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1719 u32 *curr_ial_ptr;
1720 struct sk_buff *skb;
1721 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
1722
1723 /*
1724 * Get the inbound address list (small buffer).
1725 */
1726 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
1727 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1728 qdev->small_buf_index = 0;
1729
1730 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
1731 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1732 qdev->small_buf_release_cnt++;
1733
1734 /* start of first buffer */
1735 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1736 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1737 qdev->lrg_buf_release_cnt++;
1738 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1739 qdev->lrg_buf_index = 0;
1740 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1741 curr_ial_ptr++;
1742
1743 /* start of second buffer */
1744 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1745 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1746
1747 /*
1748 * Second buffer gets sent up the stack.
1749 */
1750 qdev->lrg_buf_release_cnt++;
1751 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1752 qdev->lrg_buf_index = 0;
1753 skb = lrg_buf_cb2->skb;
1754
1755 qdev->stats.rx_packets++;
1756 qdev->stats.rx_bytes += length;
1757
1758 skb_put(skb, length);
1759 pci_unmap_single(qdev->pdev,
1760 pci_unmap_addr(lrg_buf_cb2, mapaddr),
1761 pci_unmap_len(lrg_buf_cb2, maplen),
1762 PCI_DMA_FROMDEVICE);
1763 prefetch(skb->data);
1764 skb->dev = qdev->ndev;
1765 skb->ip_summed = CHECKSUM_NONE;
1766 skb->protocol = eth_type_trans(skb, qdev->ndev);
1767
1768 netif_receive_skb(skb);
1769 qdev->ndev->last_rx = jiffies;
1770 lrg_buf_cb2->skb = NULL;
1771
1772 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1773 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1774}
1775
1776static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1777 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
1778{
1779 long int offset;
1780 u32 lrg_buf_phy_addr_low = 0;
1781 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1782 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1783 u32 *curr_ial_ptr;
1784 struct sk_buff *skb1, *skb2;
1785 struct net_device *ndev = qdev->ndev;
1786 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
1787 u16 size = 0;
1788
1789 /*
1790 * Get the inbound address list (small buffer).
1791 */
1792
1793 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
1794 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1795 qdev->small_buf_index = 0;
1796 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
1797 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1798 qdev->small_buf_release_cnt++;
1799
1800 /* start of first buffer */
1801 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1802 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1803
1804 qdev->lrg_buf_release_cnt++;
1805 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1806 qdev->lrg_buf_index = 0;
1807 skb1 = lrg_buf_cb1->skb;
1808 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1809 curr_ial_ptr++;
1810
1811 /* start of second buffer */
1812 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1813 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1814 skb2 = lrg_buf_cb2->skb;
1815 qdev->lrg_buf_release_cnt++;
1816 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1817 qdev->lrg_buf_index = 0;
1818
1819 qdev->stats.rx_packets++;
1820 qdev->stats.rx_bytes += length;
1821
1822 /*
1823 * Copy the ethhdr from first buffer to second. This
1824 * is necessary for IP completions.
1825 */
1826 if (*((u16 *) skb1->data) != 0xFFFF)
1827 size = VLAN_ETH_HLEN;
1828 else
1829 size = ETH_HLEN;
1830
1831 skb_put(skb2, length); /* Just the second buffer length here. */
1832 pci_unmap_single(qdev->pdev,
1833 pci_unmap_addr(lrg_buf_cb2, mapaddr),
1834 pci_unmap_len(lrg_buf_cb2, maplen),
1835 PCI_DMA_FROMDEVICE);
1836 prefetch(skb2->data);
1837
1838 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1839 skb2->dev = qdev->ndev;
1840 skb2->ip_summed = CHECKSUM_NONE;
1841 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
1842
1843 netif_receive_skb(skb2);
1844 ndev->last_rx = jiffies;
1845 lrg_buf_cb2->skb = NULL;
1846
1847 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1848 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1849}
1850
1851static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1852 int *tx_cleaned, int *rx_cleaned, int work_to_do)
1853{
1854 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1855 struct net_rsp_iocb *net_rsp;
1856 struct net_device *ndev = qdev->ndev;
1857 unsigned long hw_flags;
1858
1859 /* While there are entries in the completion queue. */
1860 while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
1861 qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) {
1862
1863 net_rsp = qdev->rsp_current;
1864 switch (net_rsp->opcode) {
1865
1866 case OPCODE_OB_MAC_IOCB_FN0:
1867 case OPCODE_OB_MAC_IOCB_FN2:
1868 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
1869 net_rsp);
1870 (*tx_cleaned)++;
1871 break;
1872
1873 case OPCODE_IB_MAC_IOCB:
1874 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
1875 net_rsp);
1876 (*rx_cleaned)++;
1877 break;
1878
1879 case OPCODE_IB_IP_IOCB:
1880 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
1881 net_rsp);
1882 (*rx_cleaned)++;
1883 break;
1884 default:
1885 {
1886 u32 *tmp = (u32 *) net_rsp;
1887 printk(KERN_ERR PFX
1888 "%s: Hit default case, not "
1889 "handled!\n"
1890 " dropping the packet, opcode = "
1891 "%x.\n",
1892 ndev->name, net_rsp->opcode);
1893 printk(KERN_ERR PFX
1894 "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
1895 (unsigned long int)tmp[0],
1896 (unsigned long int)tmp[1],
1897 (unsigned long int)tmp[2],
1898 (unsigned long int)tmp[3]);
1899 }
1900 }
1901
1902 qdev->rsp_consumer_index++;
1903
1904 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
1905 qdev->rsp_consumer_index = 0;
1906 qdev->rsp_current = qdev->rsp_q_virt_addr;
1907 } else {
1908 qdev->rsp_current++;
1909 }
1910 }
1911
1912 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1913
1914 ql_update_lrg_bufq_prod_index(qdev);
1915
1916 if (qdev->small_buf_release_cnt >= 16) {
1917 while (qdev->small_buf_release_cnt >= 16) {
1918 qdev->small_buf_q_producer_index++;
1919
1920 if (qdev->small_buf_q_producer_index ==
1921 NUM_SBUFQ_ENTRIES)
1922 qdev->small_buf_q_producer_index = 0;
1923 qdev->small_buf_release_cnt -= 8;
1924 }
1925
1926 ql_write_common_reg(qdev,
1927 (u32 *) & port_regs->CommonRegs.
1928 rxSmallQProducerIndex,
1929 qdev->small_buf_q_producer_index);
1930 }
1931
1932 ql_write_common_reg(qdev,
1933 (u32 *) & port_regs->CommonRegs.rspQConsumerIndex,
1934 qdev->rsp_consumer_index);
1935 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1936
1937 if (unlikely(netif_queue_stopped(qdev->ndev))) {
1938 if (netif_queue_stopped(qdev->ndev) &&
1939 (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
1940 netif_wake_queue(qdev->ndev);
1941 }
1942
1943 return *tx_cleaned + *rx_cleaned;
1944}
1945
1946static int ql_poll(struct net_device *ndev, int *budget)
1947{
1948 struct ql3_adapter *qdev = netdev_priv(ndev);
1949 int work_to_do = min(*budget, ndev->quota);
1950 int rx_cleaned = 0, tx_cleaned = 0;
1951
1952 if (!netif_carrier_ok(ndev))
1953 goto quit_polling;
1954
1955 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do);
1956 *budget -= rx_cleaned;
1957 ndev->quota -= rx_cleaned;
1958
1959 if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) {
1960quit_polling:
1961 netif_rx_complete(ndev);
1962 ql_enable_interrupts(qdev);
1963 return 0;
1964 }
1965 return 1;
1966}
1967
1968static irqreturn_t ql3xxx_isr(int irq, void *dev_id, struct pt_regs *regs)
1969{
1970
1971 struct net_device *ndev = dev_id;
1972 struct ql3_adapter *qdev = netdev_priv(ndev);
1973 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1974 u32 value;
1975 int handled = 1;
1976 u32 var;
1977
1978 port_regs = qdev->mem_map_registers;
1979
1980 value =
1981 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
1982
1983 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
1984 spin_lock(&qdev->adapter_lock);
1985 netif_stop_queue(qdev->ndev);
1986 netif_carrier_off(qdev->ndev);
1987 ql_disable_interrupts(qdev);
1988 qdev->port_link_state = LS_DOWN;
1989 set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
1990
1991 if (value & ISP_CONTROL_FE) {
1992 /*
1993 * Chip Fatal Error.
1994 */
1995 var =
1996 ql_read_page0_reg_l(qdev,
1997 &port_regs->PortFatalErrStatus);
1998 printk(KERN_WARNING PFX
1999 "%s: Resetting chip. PortFatalErrStatus "
2000 "register = 0x%x\n", ndev->name, var);
2001 set_bit(QL_RESET_START,&qdev->flags) ;
2002 } else {
2003 /*
2004 * Soft Reset Requested.
2005 */
2006 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2007 printk(KERN_ERR PFX
2008 "%s: Another function issued a reset to the "
2009 "chip. ISR value = %x.\n", ndev->name, value);
2010 }
2011 queue_work(qdev->workqueue, &qdev->reset_work);
2012 spin_unlock(&qdev->adapter_lock);
2013 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2014 ql_disable_interrupts(qdev);
2015 if (likely(netif_rx_schedule_prep(ndev)))
2016 __netif_rx_schedule(ndev);
2017 else
2018 ql_enable_interrupts(qdev);
2019 } else {
2020 return IRQ_NONE;
2021 }
2022
2023 return IRQ_RETVAL(handled);
2024}
2025
2026static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2027{
2028 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2029 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2030 struct ql_tx_buf_cb *tx_cb;
2031 struct ob_mac_iocb_req *mac_iocb_ptr;
2032 u64 map;
2033
2034 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2035 if (!netif_queue_stopped(ndev))
2036 netif_stop_queue(ndev);
2037 return NETDEV_TX_BUSY;
2038 }
2039 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2040 mac_iocb_ptr = tx_cb->queue_entry;
2041 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2042 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2043 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2044 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2045 mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len);
2046 tx_cb->skb = skb;
2047 map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
2048 mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map));
2049 mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map));
2050 mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E);
2051 pci_unmap_addr_set(tx_cb, mapaddr, map);
2052 pci_unmap_len_set(tx_cb, maplen, skb->len);
2053 atomic_dec(&qdev->tx_count);
2054
2055 qdev->req_producer_index++;
2056 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2057 qdev->req_producer_index = 0;
2058 wmb();
2059 ql_write_common_reg_l(qdev,
2060 (u32 *) & port_regs->CommonRegs.reqQProducerIndex,
2061 qdev->req_producer_index);
2062
2063 ndev->trans_start = jiffies;
2064 if (netif_msg_tx_queued(qdev))
2065 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2066 ndev->name, qdev->req_producer_index, skb->len);
2067
2068 return NETDEV_TX_OK;
2069}
2070static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2071{
2072 qdev->req_q_size =
2073 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2074
2075 qdev->req_q_virt_addr =
2076 pci_alloc_consistent(qdev->pdev,
2077 (size_t) qdev->req_q_size,
2078 &qdev->req_q_phy_addr);
2079
2080 if ((qdev->req_q_virt_addr == NULL) ||
2081 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2082 printk(KERN_ERR PFX "%s: reqQ failed.\n",
2083 qdev->ndev->name);
2084 return -ENOMEM;
2085 }
2086
2087 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2088
2089 qdev->rsp_q_virt_addr =
2090 pci_alloc_consistent(qdev->pdev,
2091 (size_t) qdev->rsp_q_size,
2092 &qdev->rsp_q_phy_addr);
2093
2094 if ((qdev->rsp_q_virt_addr == NULL) ||
2095 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2096 printk(KERN_ERR PFX
2097 "%s: rspQ allocation failed\n",
2098 qdev->ndev->name);
2099 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2100 qdev->req_q_virt_addr,
2101 qdev->req_q_phy_addr);
2102 return -ENOMEM;
2103 }
2104
2105 set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2106
2107 return 0;
2108}
2109
2110static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2111{
2112 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2113 printk(KERN_INFO PFX
2114 "%s: Already done.\n", qdev->ndev->name);
2115 return;
2116 }
2117
2118 pci_free_consistent(qdev->pdev,
2119 qdev->req_q_size,
2120 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2121
2122 qdev->req_q_virt_addr = NULL;
2123
2124 pci_free_consistent(qdev->pdev,
2125 qdev->rsp_q_size,
2126 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2127
2128 qdev->rsp_q_virt_addr = NULL;
2129
2130 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2131}
2132
2133static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2134{
2135 /* Create Large Buffer Queue */
2136 qdev->lrg_buf_q_size =
2137 NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2138 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2139 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2140 else
2141 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2142
2143 qdev->lrg_buf_q_alloc_virt_addr =
2144 pci_alloc_consistent(qdev->pdev,
2145 qdev->lrg_buf_q_alloc_size,
2146 &qdev->lrg_buf_q_alloc_phy_addr);
2147
2148 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2149 printk(KERN_ERR PFX
2150 "%s: lBufQ failed\n", qdev->ndev->name);
2151 return -ENOMEM;
2152 }
2153 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2154 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2155
2156 /* Create Small Buffer Queue */
2157 qdev->small_buf_q_size =
2158 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2159 if (qdev->small_buf_q_size < PAGE_SIZE)
2160 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2161 else
2162 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2163
2164 qdev->small_buf_q_alloc_virt_addr =
2165 pci_alloc_consistent(qdev->pdev,
2166 qdev->small_buf_q_alloc_size,
2167 &qdev->small_buf_q_alloc_phy_addr);
2168
2169 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2170 printk(KERN_ERR PFX
2171 "%s: Small Buffer Queue allocation failed.\n",
2172 qdev->ndev->name);
2173 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2174 qdev->lrg_buf_q_alloc_virt_addr,
2175 qdev->lrg_buf_q_alloc_phy_addr);
2176 return -ENOMEM;
2177 }
2178
2179 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2180 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2181 set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2182 return 0;
2183}
2184
2185static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2186{
2187 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2188 printk(KERN_INFO PFX
2189 "%s: Already done.\n", qdev->ndev->name);
2190 return;
2191 }
2192
2193 pci_free_consistent(qdev->pdev,
2194 qdev->lrg_buf_q_alloc_size,
2195 qdev->lrg_buf_q_alloc_virt_addr,
2196 qdev->lrg_buf_q_alloc_phy_addr);
2197
2198 qdev->lrg_buf_q_virt_addr = NULL;
2199
2200 pci_free_consistent(qdev->pdev,
2201 qdev->small_buf_q_alloc_size,
2202 qdev->small_buf_q_alloc_virt_addr,
2203 qdev->small_buf_q_alloc_phy_addr);
2204
2205 qdev->small_buf_q_virt_addr = NULL;
2206
2207 clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2208}
2209
2210static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2211{
2212 int i;
2213 struct bufq_addr_element *small_buf_q_entry;
2214
2215 /* Currently we allocate on one of memory and use it for smallbuffers */
2216 qdev->small_buf_total_size =
2217 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2218 QL_SMALL_BUFFER_SIZE);
2219
2220 qdev->small_buf_virt_addr =
2221 pci_alloc_consistent(qdev->pdev,
2222 qdev->small_buf_total_size,
2223 &qdev->small_buf_phy_addr);
2224
2225 if (qdev->small_buf_virt_addr == NULL) {
2226 printk(KERN_ERR PFX
2227 "%s: Failed to get small buffer memory.\n",
2228 qdev->ndev->name);
2229 return -ENOMEM;
2230 }
2231
2232 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2233 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2234
2235 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2236
2237 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low;
2238
2239 /* Initialize the small buffer queue. */
2240 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2241 small_buf_q_entry->addr_high =
2242 cpu_to_le32(qdev->small_buf_phy_addr_high);
2243 small_buf_q_entry->addr_low =
2244 cpu_to_le32(qdev->small_buf_phy_addr_low +
2245 (i * QL_SMALL_BUFFER_SIZE));
2246 small_buf_q_entry++;
2247 }
2248 qdev->small_buf_index = 0;
2249 set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
2250 return 0;
2251}
2252
2253static void ql_free_small_buffers(struct ql3_adapter *qdev)
2254{
2255 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2256 printk(KERN_INFO PFX
2257 "%s: Already done.\n", qdev->ndev->name);
2258 return;
2259 }
2260 if (qdev->small_buf_virt_addr != NULL) {
2261 pci_free_consistent(qdev->pdev,
2262 qdev->small_buf_total_size,
2263 qdev->small_buf_virt_addr,
2264 qdev->small_buf_phy_addr);
2265
2266 qdev->small_buf_virt_addr = NULL;
2267 }
2268}
2269
2270static void ql_free_large_buffers(struct ql3_adapter *qdev)
2271{
2272 int i = 0;
2273 struct ql_rcv_buf_cb *lrg_buf_cb;
2274
2275 for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
2276 lrg_buf_cb = &qdev->lrg_buf[i];
2277 if (lrg_buf_cb->skb) {
2278 dev_kfree_skb(lrg_buf_cb->skb);
2279 pci_unmap_single(qdev->pdev,
2280 pci_unmap_addr(lrg_buf_cb, mapaddr),
2281 pci_unmap_len(lrg_buf_cb, maplen),
2282 PCI_DMA_FROMDEVICE);
2283 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2284 } else {
2285 break;
2286 }
2287 }
2288}
2289
2290static void ql_init_large_buffers(struct ql3_adapter *qdev)
2291{
2292 int i;
2293 struct ql_rcv_buf_cb *lrg_buf_cb;
2294 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2295
2296 for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
2297 lrg_buf_cb = &qdev->lrg_buf[i];
2298 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2299 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2300 buf_addr_ele++;
2301 }
2302 qdev->lrg_buf_index = 0;
2303 qdev->lrg_buf_skb_check = 0;
2304}
2305
2306static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2307{
2308 int i;
2309 struct ql_rcv_buf_cb *lrg_buf_cb;
2310 struct sk_buff *skb;
2311 u64 map;
2312
2313 for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
2314 skb = dev_alloc_skb(qdev->lrg_buffer_len);
2315 if (unlikely(!skb)) {
2316 /* Better luck next round */
2317 printk(KERN_ERR PFX
2318 "%s: large buff alloc failed, "
2319 "for %d bytes at index %d.\n",
2320 qdev->ndev->name,
2321 qdev->lrg_buffer_len * 2, i);
2322 ql_free_large_buffers(qdev);
2323 return -ENOMEM;
2324 } else {
2325
2326 lrg_buf_cb = &qdev->lrg_buf[i];
2327 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2328 lrg_buf_cb->index = i;
2329 lrg_buf_cb->skb = skb;
2330 /*
2331 * We save some space to copy the ethhdr from first
2332 * buffer
2333 */
2334 skb_reserve(skb, QL_HEADER_SPACE);
2335 map = pci_map_single(qdev->pdev,
2336 skb->data,
2337 qdev->lrg_buffer_len -
2338 QL_HEADER_SPACE,
2339 PCI_DMA_FROMDEVICE);
2340 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2341 pci_unmap_len_set(lrg_buf_cb, maplen,
2342 qdev->lrg_buffer_len -
2343 QL_HEADER_SPACE);
2344 lrg_buf_cb->buf_phy_addr_low =
2345 cpu_to_le32(LS_64BITS(map));
2346 lrg_buf_cb->buf_phy_addr_high =
2347 cpu_to_le32(MS_64BITS(map));
2348 }
2349 }
2350 return 0;
2351}
2352
2353static void ql_create_send_free_list(struct ql3_adapter *qdev)
2354{
2355 struct ql_tx_buf_cb *tx_cb;
2356 int i;
2357 struct ob_mac_iocb_req *req_q_curr =
2358 qdev->req_q_virt_addr;
2359
2360 /* Create free list of transmit buffers */
2361 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2362 tx_cb = &qdev->tx_buf[i];
2363 tx_cb->skb = NULL;
2364 tx_cb->queue_entry = req_q_curr;
2365 req_q_curr++;
2366 }
2367}
2368
2369static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2370{
2371 if (qdev->ndev->mtu == NORMAL_MTU_SIZE)
2372 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2373 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2374 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2375 } else {
2376 printk(KERN_ERR PFX
2377 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
2378 qdev->ndev->name);
2379 return -ENOMEM;
2380 }
2381 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2382 qdev->max_frame_size =
2383 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2384
2385 /*
2386 * First allocate a page of shared memory and use it for shadow
2387 * locations of Network Request Queue Consumer Address Register and
2388 * Network Completion Queue Producer Index Register
2389 */
2390 qdev->shadow_reg_virt_addr =
2391 pci_alloc_consistent(qdev->pdev,
2392 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2393
2394 if (qdev->shadow_reg_virt_addr != NULL) {
2395 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
2396 qdev->req_consumer_index_phy_addr_high =
2397 MS_64BITS(qdev->shadow_reg_phy_addr);
2398 qdev->req_consumer_index_phy_addr_low =
2399 LS_64BITS(qdev->shadow_reg_phy_addr);
2400
2401 qdev->prsp_producer_index =
2402 (u32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2403 qdev->rsp_producer_index_phy_addr_high =
2404 qdev->req_consumer_index_phy_addr_high;
2405 qdev->rsp_producer_index_phy_addr_low =
2406 qdev->req_consumer_index_phy_addr_low + 8;
2407 } else {
2408 printk(KERN_ERR PFX
2409 "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
2410 return -ENOMEM;
2411 }
2412
2413 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2414 printk(KERN_ERR PFX
2415 "%s: ql_alloc_net_req_rsp_queues failed.\n",
2416 qdev->ndev->name);
2417 goto err_req_rsp;
2418 }
2419
2420 if (ql_alloc_buffer_queues(qdev) != 0) {
2421 printk(KERN_ERR PFX
2422 "%s: ql_alloc_buffer_queues failed.\n",
2423 qdev->ndev->name);
2424 goto err_buffer_queues;
2425 }
2426
2427 if (ql_alloc_small_buffers(qdev) != 0) {
2428 printk(KERN_ERR PFX
2429 "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
2430 goto err_small_buffers;
2431 }
2432
2433 if (ql_alloc_large_buffers(qdev) != 0) {
2434 printk(KERN_ERR PFX
2435 "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
2436 goto err_small_buffers;
2437 }
2438
2439 /* Initialize the large buffer queue. */
2440 ql_init_large_buffers(qdev);
2441 ql_create_send_free_list(qdev);
2442
2443 qdev->rsp_current = qdev->rsp_q_virt_addr;
2444
2445 return 0;
2446
2447err_small_buffers:
2448 ql_free_buffer_queues(qdev);
2449err_buffer_queues:
2450 ql_free_net_req_rsp_queues(qdev);
2451err_req_rsp:
2452 pci_free_consistent(qdev->pdev,
2453 PAGE_SIZE,
2454 qdev->shadow_reg_virt_addr,
2455 qdev->shadow_reg_phy_addr);
2456
2457 return -ENOMEM;
2458}
2459
2460static void ql_free_mem_resources(struct ql3_adapter *qdev)
2461{
2462 ql_free_large_buffers(qdev);
2463 ql_free_small_buffers(qdev);
2464 ql_free_buffer_queues(qdev);
2465 ql_free_net_req_rsp_queues(qdev);
2466 if (qdev->shadow_reg_virt_addr != NULL) {
2467 pci_free_consistent(qdev->pdev,
2468 PAGE_SIZE,
2469 qdev->shadow_reg_virt_addr,
2470 qdev->shadow_reg_phy_addr);
2471 qdev->shadow_reg_virt_addr = NULL;
2472 }
2473}
2474
2475static int ql_init_misc_registers(struct ql3_adapter *qdev)
2476{
2477 struct ql3xxx_local_ram_registers *local_ram =
2478 (struct ql3xxx_local_ram_registers *)qdev->mem_map_registers;
2479
2480 if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2481 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2482 2) << 4))
2483 return -1;
2484
2485 ql_write_page2_reg(qdev,
2486 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2487
2488 ql_write_page2_reg(qdev,
2489 &local_ram->maxBufletCount,
2490 qdev->nvram_data.bufletCount);
2491
2492 ql_write_page2_reg(qdev,
2493 &local_ram->freeBufletThresholdLow,
2494 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2495 (qdev->nvram_data.tcpWindowThreshold0));
2496
2497 ql_write_page2_reg(qdev,
2498 &local_ram->freeBufletThresholdHigh,
2499 qdev->nvram_data.tcpWindowThreshold50);
2500
2501 ql_write_page2_reg(qdev,
2502 &local_ram->ipHashTableBase,
2503 (qdev->nvram_data.ipHashTableBaseHi << 16) |
2504 qdev->nvram_data.ipHashTableBaseLo);
2505 ql_write_page2_reg(qdev,
2506 &local_ram->ipHashTableCount,
2507 qdev->nvram_data.ipHashTableSize);
2508 ql_write_page2_reg(qdev,
2509 &local_ram->tcpHashTableBase,
2510 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2511 qdev->nvram_data.tcpHashTableBaseLo);
2512 ql_write_page2_reg(qdev,
2513 &local_ram->tcpHashTableCount,
2514 qdev->nvram_data.tcpHashTableSize);
2515 ql_write_page2_reg(qdev,
2516 &local_ram->ncbBase,
2517 (qdev->nvram_data.ncbTableBaseHi << 16) |
2518 qdev->nvram_data.ncbTableBaseLo);
2519 ql_write_page2_reg(qdev,
2520 &local_ram->maxNcbCount,
2521 qdev->nvram_data.ncbTableSize);
2522 ql_write_page2_reg(qdev,
2523 &local_ram->drbBase,
2524 (qdev->nvram_data.drbTableBaseHi << 16) |
2525 qdev->nvram_data.drbTableBaseLo);
2526 ql_write_page2_reg(qdev,
2527 &local_ram->maxDrbCount,
2528 qdev->nvram_data.drbTableSize);
2529 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
2530 return 0;
2531}
2532
2533static int ql_adapter_initialize(struct ql3_adapter *qdev)
2534{
2535 u32 value;
2536 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2537 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
2538 (struct ql3xxx_host_memory_registers *)port_regs;
2539 u32 delay = 10;
2540 int status = 0;
2541
2542 if(ql_mii_setup(qdev))
2543 return -1;
2544
2545 /* Bring out PHY out of reset */
2546 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
2547 (ISP_SERIAL_PORT_IF_WE |
2548 (ISP_SERIAL_PORT_IF_WE << 16)));
2549
2550 qdev->port_link_state = LS_DOWN;
2551 netif_carrier_off(qdev->ndev);
2552
2553 /* V2 chip fix for ARS-39168. */
2554 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
2555 (ISP_SERIAL_PORT_IF_SDE |
2556 (ISP_SERIAL_PORT_IF_SDE << 16)));
2557
2558 /* Request Queue Registers */
2559 *((u32 *) (qdev->preq_consumer_index)) = 0;
2560 atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
2561 qdev->req_producer_index = 0;
2562
2563 ql_write_page1_reg(qdev,
2564 &hmem_regs->reqConsumerIndexAddrHigh,
2565 qdev->req_consumer_index_phy_addr_high);
2566 ql_write_page1_reg(qdev,
2567 &hmem_regs->reqConsumerIndexAddrLow,
2568 qdev->req_consumer_index_phy_addr_low);
2569
2570 ql_write_page1_reg(qdev,
2571 &hmem_regs->reqBaseAddrHigh,
2572 MS_64BITS(qdev->req_q_phy_addr));
2573 ql_write_page1_reg(qdev,
2574 &hmem_regs->reqBaseAddrLow,
2575 LS_64BITS(qdev->req_q_phy_addr));
2576 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
2577
2578 /* Response Queue Registers */
2579 *((u16 *) (qdev->prsp_producer_index)) = 0;
2580 qdev->rsp_consumer_index = 0;
2581 qdev->rsp_current = qdev->rsp_q_virt_addr;
2582
2583 ql_write_page1_reg(qdev,
2584 &hmem_regs->rspProducerIndexAddrHigh,
2585 qdev->rsp_producer_index_phy_addr_high);
2586
2587 ql_write_page1_reg(qdev,
2588 &hmem_regs->rspProducerIndexAddrLow,
2589 qdev->rsp_producer_index_phy_addr_low);
2590
2591 ql_write_page1_reg(qdev,
2592 &hmem_regs->rspBaseAddrHigh,
2593 MS_64BITS(qdev->rsp_q_phy_addr));
2594
2595 ql_write_page1_reg(qdev,
2596 &hmem_regs->rspBaseAddrLow,
2597 LS_64BITS(qdev->rsp_q_phy_addr));
2598
2599 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
2600
2601 /* Large Buffer Queue */
2602 ql_write_page1_reg(qdev,
2603 &hmem_regs->rxLargeQBaseAddrHigh,
2604 MS_64BITS(qdev->lrg_buf_q_phy_addr));
2605
2606 ql_write_page1_reg(qdev,
2607 &hmem_regs->rxLargeQBaseAddrLow,
2608 LS_64BITS(qdev->lrg_buf_q_phy_addr));
2609
2610 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES);
2611
2612 ql_write_page1_reg(qdev,
2613 &hmem_regs->rxLargeBufferLength,
2614 qdev->lrg_buffer_len);
2615
2616 /* Small Buffer Queue */
2617 ql_write_page1_reg(qdev,
2618 &hmem_regs->rxSmallQBaseAddrHigh,
2619 MS_64BITS(qdev->small_buf_q_phy_addr));
2620
2621 ql_write_page1_reg(qdev,
2622 &hmem_regs->rxSmallQBaseAddrLow,
2623 LS_64BITS(qdev->small_buf_q_phy_addr));
2624
2625 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
2626 ql_write_page1_reg(qdev,
2627 &hmem_regs->rxSmallBufferLength,
2628 QL_SMALL_BUFFER_SIZE);
2629
2630 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
2631 qdev->small_buf_release_cnt = 8;
2632 qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1;
2633 qdev->lrg_buf_release_cnt = 8;
2634 qdev->lrg_buf_next_free =
2635 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
2636 qdev->small_buf_index = 0;
2637 qdev->lrg_buf_index = 0;
2638 qdev->lrg_buf_free_count = 0;
2639 qdev->lrg_buf_free_head = NULL;
2640 qdev->lrg_buf_free_tail = NULL;
2641
2642 ql_write_common_reg(qdev,
2643 (u32 *) & port_regs->CommonRegs.
2644 rxSmallQProducerIndex,
2645 qdev->small_buf_q_producer_index);
2646 ql_write_common_reg(qdev,
2647 (u32 *) & port_regs->CommonRegs.
2648 rxLargeQProducerIndex,
2649 qdev->lrg_buf_q_producer_index);
2650
2651 /*
2652 * Find out if the chip has already been initialized. If it has, then
2653 * we skip some of the initialization.
2654 */
2655 clear_bit(QL_LINK_MASTER, &qdev->flags);
2656 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
2657 if ((value & PORT_STATUS_IC) == 0) {
2658
2659 /* Chip has not been configured yet, so let it rip. */
2660 if(ql_init_misc_registers(qdev)) {
2661 status = -1;
2662 goto out;
2663 }
2664
2665 if (qdev->mac_index)
2666 ql_write_page0_reg(qdev,
2667 &port_regs->mac1MaxFrameLengthReg,
2668 qdev->max_frame_size);
2669 else
2670 ql_write_page0_reg(qdev,
2671 &port_regs->mac0MaxFrameLengthReg,
2672 qdev->max_frame_size);
2673
2674 value = qdev->nvram_data.tcpMaxWindowSize;
2675 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
2676
2677 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
2678
2679 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
2680 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
2681 * 2) << 13)) {
2682 status = -1;
2683 goto out;
2684 }
2685 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
2686 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
2687 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
2688 16) | (INTERNAL_CHIP_SD |
2689 INTERNAL_CHIP_WE)));
2690 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
2691 }
2692
2693
2694 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
2695 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2696 2) << 7)) {
2697 status = -1;
2698 goto out;
2699 }
2700
2701 ql_init_scan_mode(qdev);
2702 ql_get_phy_owner(qdev);
2703
2704 /* Load the MAC Configuration */
2705
2706 /* Program lower 32 bits of the MAC address */
2707 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
2708 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
2709 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
2710 ((qdev->ndev->dev_addr[2] << 24)
2711 | (qdev->ndev->dev_addr[3] << 16)
2712 | (qdev->ndev->dev_addr[4] << 8)
2713 | qdev->ndev->dev_addr[5]));
2714
2715 /* Program top 16 bits of the MAC address */
2716 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
2717 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
2718 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
2719 ((qdev->ndev->dev_addr[0] << 8)
2720 | qdev->ndev->dev_addr[1]));
2721
2722 /* Enable Primary MAC */
2723 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
2724 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
2725 MAC_ADDR_INDIRECT_PTR_REG_PE));
2726
2727 /* Clear Primary and Secondary IP addresses */
2728 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
2729 ((IP_ADDR_INDEX_REG_MASK << 16) |
2730 (qdev->mac_index << 2)));
2731 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
2732
2733 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
2734 ((IP_ADDR_INDEX_REG_MASK << 16) |
2735 ((qdev->mac_index << 2) + 1)));
2736 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
2737
2738 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
2739
2740 /* Indicate Configuration Complete */
2741 ql_write_page0_reg(qdev,
2742 &port_regs->portControl,
2743 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
2744
2745 do {
2746 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
2747 if (value & PORT_STATUS_IC)
2748 break;
2749 msleep(500);
2750 } while (--delay);
2751
2752 if (delay == 0) {
2753 printk(KERN_ERR PFX
2754 "%s: Hw Initialization timeout.\n", qdev->ndev->name);
2755 status = -1;
2756 goto out;
2757 }
2758
2759 /* Enable Ethernet Function */
2760 value =
2761 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
2762 PORT_CONTROL_HH);
2763 ql_write_page0_reg(qdev, &port_regs->portControl,
2764 ((value << 16) | value));
2765
2766out:
2767 return status;
2768}
2769
2770/*
2771 * Caller holds hw_lock.
2772 */
2773static int ql_adapter_reset(struct ql3_adapter *qdev)
2774{
2775 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2776 int status = 0;
2777 u16 value;
2778 int max_wait_time;
2779
2780 set_bit(QL_RESET_ACTIVE, &qdev->flags);
2781 clear_bit(QL_RESET_DONE, &qdev->flags);
2782
2783 /*
2784 * Issue soft reset to chip.
2785 */
2786 printk(KERN_DEBUG PFX
2787 "%s: Issue soft reset to chip.\n",
2788 qdev->ndev->name);
2789 ql_write_common_reg(qdev,
2790 (u32 *) & port_regs->CommonRegs.ispControlStatus,
2791 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
2792
2793 /* Wait 3 seconds for reset to complete. */
2794 printk(KERN_DEBUG PFX
2795 "%s: Wait 10 milliseconds for reset to complete.\n",
2796 qdev->ndev->name);
2797
2798 /* Wait until the firmware tells us the Soft Reset is done */
2799 max_wait_time = 5;
2800 do {
2801 value =
2802 ql_read_common_reg(qdev,
2803 &port_regs->CommonRegs.ispControlStatus);
2804 if ((value & ISP_CONTROL_SR) == 0)
2805 break;
2806
2807 ssleep(1);
2808 } while ((--max_wait_time));
2809
2810 /*
2811 * Also, make sure that the Network Reset Interrupt bit has been
2812 * cleared after the soft reset has taken place.
2813 */
2814 value =
2815 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
2816 if (value & ISP_CONTROL_RI) {
2817 printk(KERN_DEBUG PFX
2818 "ql_adapter_reset: clearing RI after reset.\n");
2819 ql_write_common_reg(qdev,
2820 (u32 *) & port_regs->CommonRegs.
2821 ispControlStatus,
2822 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
2823 }
2824
2825 if (max_wait_time == 0) {
2826 /* Issue Force Soft Reset */
2827 ql_write_common_reg(qdev,
2828 (u32 *) & port_regs->CommonRegs.
2829 ispControlStatus,
2830 ((ISP_CONTROL_FSR << 16) |
2831 ISP_CONTROL_FSR));
2832 /*
2833 * Wait until the firmware tells us the Force Soft Reset is
2834 * done
2835 */
2836 max_wait_time = 5;
2837 do {
2838 value =
2839 ql_read_common_reg(qdev,
2840 &port_regs->CommonRegs.
2841 ispControlStatus);
2842 if ((value & ISP_CONTROL_FSR) == 0) {
2843 break;
2844 }
2845 ssleep(1);
2846 } while ((--max_wait_time));
2847 }
2848 if (max_wait_time == 0)
2849 status = 1;
2850
2851 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
2852 set_bit(QL_RESET_DONE, &qdev->flags);
2853 return status;
2854}
2855
2856static void ql_set_mac_info(struct ql3_adapter *qdev)
2857{
2858 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2859 u32 value, port_status;
2860 u8 func_number;
2861
2862 /* Get the function number */
2863 value =
2864 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2865 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
2866 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
2867 switch (value & ISP_CONTROL_FN_MASK) {
2868 case ISP_CONTROL_FN0_NET:
2869 qdev->mac_index = 0;
2870 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
2871 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
2872 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
2873 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
2874 qdev->PHYAddr = PORT0_PHY_ADDRESS;
2875 if (port_status & PORT_STATUS_SM0)
2876 set_bit(QL_LINK_OPTICAL,&qdev->flags);
2877 else
2878 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
2879 break;
2880
2881 case ISP_CONTROL_FN1_NET:
2882 qdev->mac_index = 1;
2883 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
2884 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
2885 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
2886 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
2887 qdev->PHYAddr = PORT1_PHY_ADDRESS;
2888 if (port_status & PORT_STATUS_SM1)
2889 set_bit(QL_LINK_OPTICAL,&qdev->flags);
2890 else
2891 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
2892 break;
2893
2894 case ISP_CONTROL_FN0_SCSI:
2895 case ISP_CONTROL_FN1_SCSI:
2896 default:
2897 printk(KERN_DEBUG PFX
2898 "%s: Invalid function number, ispControlStatus = 0x%x\n",
2899 qdev->ndev->name,value);
2900 break;
2901 }
2902 qdev->numPorts = qdev->nvram_data.numPorts;
2903}
2904
2905static void ql_display_dev_info(struct net_device *ndev)
2906{
2907 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2908 struct pci_dev *pdev = qdev->pdev;
2909
2910 printk(KERN_INFO PFX
2911 "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n",
2912 DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot);
2913 printk(KERN_INFO PFX
2914 "%s Interface.\n",
2915 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
2916
2917 /*
2918 * Print PCI bus width/type.
2919 */
2920 printk(KERN_INFO PFX
2921 "Bus interface is %s %s.\n",
2922 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
2923 ((qdev->pci_x) ? "PCI-X" : "PCI"));
2924
2925 printk(KERN_INFO PFX
2926 "mem IO base address adjusted = 0x%p\n",
2927 qdev->mem_map_registers);
2928 printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
2929
2930 if (netif_msg_probe(qdev))
2931 printk(KERN_INFO PFX
2932 "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
2933 ndev->name, ndev->dev_addr[0], ndev->dev_addr[1],
2934 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
2935 ndev->dev_addr[5]);
2936}
2937
2938static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
2939{
2940 struct net_device *ndev = qdev->ndev;
2941 int retval = 0;
2942
2943 netif_stop_queue(ndev);
2944 netif_carrier_off(ndev);
2945
2946 clear_bit(QL_ADAPTER_UP,&qdev->flags);
2947 clear_bit(QL_LINK_MASTER,&qdev->flags);
2948
2949 ql_disable_interrupts(qdev);
2950
2951 free_irq(qdev->pdev->irq, ndev);
2952
2953 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
2954 printk(KERN_INFO PFX
2955 "%s: calling pci_disable_msi().\n", qdev->ndev->name);
2956 clear_bit(QL_MSI_ENABLED,&qdev->flags);
2957 pci_disable_msi(qdev->pdev);
2958 }
2959
2960 del_timer_sync(&qdev->adapter_timer);
2961
2962 netif_poll_disable(ndev);
2963
2964 if (do_reset) {
2965 int soft_reset;
2966 unsigned long hw_flags;
2967
2968 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2969 if (ql_wait_for_drvr_lock(qdev)) {
2970 if ((soft_reset = ql_adapter_reset(qdev))) {
2971 printk(KERN_ERR PFX
2972 "%s: ql_adapter_reset(%d) FAILED!\n",
2973 ndev->name, qdev->index);
2974 }
2975 printk(KERN_ERR PFX
2976 "%s: Releaseing driver lock via chip reset.\n",ndev->name);
2977 } else {
2978 printk(KERN_ERR PFX
2979 "%s: Could not acquire driver lock to do "
2980 "reset!\n", ndev->name);
2981 retval = -1;
2982 }
2983 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2984 }
2985 ql_free_mem_resources(qdev);
2986 return retval;
2987}
2988
2989static int ql_adapter_up(struct ql3_adapter *qdev)
2990{
2991 struct net_device *ndev = qdev->ndev;
2992 int err;
2993 unsigned long irq_flags = SA_SAMPLE_RANDOM | SA_SHIRQ;
2994 unsigned long hw_flags;
2995
2996 if (ql_alloc_mem_resources(qdev)) {
2997 printk(KERN_ERR PFX
2998 "%s Unable to allocate buffers.\n", ndev->name);
2999 return -ENOMEM;
3000 }
3001
3002 if (qdev->msi) {
3003 if (pci_enable_msi(qdev->pdev)) {
3004 printk(KERN_ERR PFX
3005 "%s: User requested MSI, but MSI failed to "
3006 "initialize. Continuing without MSI.\n",
3007 qdev->ndev->name);
3008 qdev->msi = 0;
3009 } else {
3010 printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
3011 set_bit(QL_MSI_ENABLED,&qdev->flags);
3012 irq_flags &= ~SA_SHIRQ;
3013 }
3014 }
3015
3016 if ((err = request_irq(qdev->pdev->irq,
3017 ql3xxx_isr,
3018 irq_flags, ndev->name, ndev))) {
3019 printk(KERN_ERR PFX
3020 "%s: Failed to reserve interrupt %d already in use.\n",
3021 ndev->name, qdev->pdev->irq);
3022 goto err_irq;
3023 }
3024
3025 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3026
3027 if ((err = ql_wait_for_drvr_lock(qdev))) {
3028 if ((err = ql_adapter_initialize(qdev))) {
3029 printk(KERN_ERR PFX
3030 "%s: Unable to initialize adapter.\n",
3031 ndev->name);
3032 goto err_init;
3033 }
3034 printk(KERN_ERR PFX
3035 "%s: Releaseing driver lock.\n",ndev->name);
3036 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3037 } else {
3038 printk(KERN_ERR PFX
3039 "%s: Could not aquire driver lock.\n",
3040 ndev->name);
3041 goto err_lock;
3042 }
3043
3044 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3045
3046 set_bit(QL_ADAPTER_UP,&qdev->flags);
3047
3048 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3049
3050 netif_poll_enable(ndev);
3051 ql_enable_interrupts(qdev);
3052 return 0;
3053
3054err_init:
3055 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3056err_lock:
3057 free_irq(qdev->pdev->irq, ndev);
3058err_irq:
3059 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3060 printk(KERN_INFO PFX
3061 "%s: calling pci_disable_msi().\n",
3062 qdev->ndev->name);
3063 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3064 pci_disable_msi(qdev->pdev);
3065 }
3066 return err;
3067}
3068
3069static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3070{
3071 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3072 printk(KERN_ERR PFX
3073 "%s: Driver up/down cycle failed, "
3074 "closing device\n",qdev->ndev->name);
3075 dev_close(qdev->ndev);
3076 return -1;
3077 }
3078 return 0;
3079}
3080
3081static int ql3xxx_close(struct net_device *ndev)
3082{
3083 struct ql3_adapter *qdev = netdev_priv(ndev);
3084
3085 /*
3086 * Wait for device to recover from a reset.
3087 * (Rarely happens, but possible.)
3088 */
3089 while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
3090 msleep(50);
3091
3092 ql_adapter_down(qdev,QL_DO_RESET);
3093 return 0;
3094}
3095
3096static int ql3xxx_open(struct net_device *ndev)
3097{
3098 struct ql3_adapter *qdev = netdev_priv(ndev);
3099 return (ql_adapter_up(qdev));
3100}
3101
3102static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
3103{
3104 struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv;
3105 return &qdev->stats;
3106}
3107
3108static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu)
3109{
3110 struct ql3_adapter *qdev = netdev_priv(ndev);
3111 printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu);
3112 if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) {
3113 printk(KERN_ERR PFX
3114 "%s: mtu size of %d is not valid. Use exactly %d or "
3115 "%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE,
3116 JUMBO_MTU_SIZE);
3117 return -EINVAL;
3118 }
3119
3120 if (!netif_running(ndev)) {
3121 ndev->mtu = new_mtu;
3122 return 0;
3123 }
3124
3125 ndev->mtu = new_mtu;
3126 return ql_cycle_adapter(qdev,QL_DO_RESET);
3127}
3128
3129static void ql3xxx_set_multicast_list(struct net_device *ndev)
3130{
3131 /*
3132 * We are manually parsing the list in the net_device structure.
3133 */
3134 return;
3135}
3136
3137static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3138{
3139 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3140 struct ql3xxx_port_registers __iomem *port_regs =
3141 qdev->mem_map_registers;
3142 struct sockaddr *addr = p;
3143 unsigned long hw_flags;
3144
3145 if (netif_running(ndev))
3146 return -EBUSY;
3147
3148 if (!is_valid_ether_addr(addr->sa_data))
3149 return -EADDRNOTAVAIL;
3150
3151 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3152
3153 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3154 /* Program lower 32 bits of the MAC address */
3155 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3156 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3157 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3158 ((ndev->dev_addr[2] << 24) | (ndev->
3159 dev_addr[3] << 16) |
3160 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3161
3162 /* Program top 16 bits of the MAC address */
3163 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3164 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3165 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3166 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3167 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3168
3169 return 0;
3170}
3171
3172static void ql3xxx_tx_timeout(struct net_device *ndev)
3173{
3174 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3175
3176 printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
3177 /*
3178 * Stop the queues, we've got a problem.
3179 */
3180 netif_stop_queue(ndev);
3181
3182 /*
3183 * Wake up the worker to process this event.
3184 */
3185 queue_work(qdev->workqueue, &qdev->tx_timeout_work);
3186}
3187
3188static void ql_reset_work(struct ql3_adapter *qdev)
3189{
3190 struct net_device *ndev = qdev->ndev;
3191 u32 value;
3192 struct ql_tx_buf_cb *tx_cb;
3193 int max_wait_time, i;
3194 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3195 unsigned long hw_flags;
3196
3197 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
3198 clear_bit(QL_LINK_MASTER,&qdev->flags);
3199
3200 /*
3201 * Loop through the active list and return the skb.
3202 */
3203 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3204 tx_cb = &qdev->tx_buf[i];
3205 if (tx_cb->skb) {
3206
3207 printk(KERN_DEBUG PFX
3208 "%s: Freeing lost SKB.\n",
3209 qdev->ndev->name);
3210 pci_unmap_single(qdev->pdev,
3211 pci_unmap_addr(tx_cb, mapaddr),
3212 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
3213 dev_kfree_skb(tx_cb->skb);
3214 tx_cb->skb = NULL;
3215 }
3216 }
3217
3218 printk(KERN_ERR PFX
3219 "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3220 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3221 ql_write_common_reg(qdev,
3222 &port_regs->CommonRegs.
3223 ispControlStatus,
3224 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3225 /*
3226 * Wait the for Soft Reset to Complete.
3227 */
3228 max_wait_time = 10;
3229 do {
3230 value = ql_read_common_reg(qdev,
3231 &port_regs->CommonRegs.
3232
3233 ispControlStatus);
3234 if ((value & ISP_CONTROL_SR) == 0) {
3235 printk(KERN_DEBUG PFX
3236 "%s: reset completed.\n",
3237 qdev->ndev->name);
3238 break;
3239 }
3240
3241 if (value & ISP_CONTROL_RI) {
3242 printk(KERN_DEBUG PFX
3243 "%s: clearing NRI after reset.\n",
3244 qdev->ndev->name);
3245 ql_write_common_reg(qdev,
3246 (u32 *) &
3247 port_regs->
3248 CommonRegs.
3249 ispControlStatus,
3250 ((ISP_CONTROL_RI <<
3251 16) | ISP_CONTROL_RI));
3252 }
3253
3254 ssleep(1);
3255 } while (--max_wait_time);
3256 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3257
3258 if (value & ISP_CONTROL_SR) {
3259
3260 /*
3261 * Set the reset flags and clear the board again.
3262 * Nothing else to do...
3263 */
3264 printk(KERN_ERR PFX
3265 "%s: Timed out waiting for reset to "
3266 "complete.\n", ndev->name);
3267 printk(KERN_ERR PFX
3268 "%s: Do a reset.\n", ndev->name);
3269 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3270 clear_bit(QL_RESET_START,&qdev->flags);
3271 ql_cycle_adapter(qdev,QL_DO_RESET);
3272 return;
3273 }
3274
3275 clear_bit(QL_RESET_ACTIVE,&qdev->flags);
3276 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3277 clear_bit(QL_RESET_START,&qdev->flags);
3278 ql_cycle_adapter(qdev,QL_NO_RESET);
3279 }
3280}
3281
3282static void ql_tx_timeout_work(struct ql3_adapter *qdev)
3283{
3284 ql_cycle_adapter(qdev,QL_DO_RESET);
3285}
3286
3287static void ql_get_board_info(struct ql3_adapter *qdev)
3288{
3289 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3290 u32 value;
3291
3292 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3293
3294 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3295 if (value & PORT_STATUS_64)
3296 qdev->pci_width = 64;
3297 else
3298 qdev->pci_width = 32;
3299 if (value & PORT_STATUS_X)
3300 qdev->pci_x = 1;
3301 else
3302 qdev->pci_x = 0;
3303 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3304}
3305
3306static void ql3xxx_timer(unsigned long ptr)
3307{
3308 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3309
3310 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
3311 printk(KERN_DEBUG PFX
3312 "%s: Reset in progress.\n",
3313 qdev->ndev->name);
3314 goto end;
3315 }
3316
3317 ql_link_state_machine(qdev);
3318
3319 /* Restart timer on 2 second interval. */
3320end:
3321 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3322}
3323
3324static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3325 const struct pci_device_id *pci_entry)
3326{
3327 struct net_device *ndev = NULL;
3328 struct ql3_adapter *qdev = NULL;
3329 static int cards_found = 0;
3330 int pci_using_dac, err;
3331
3332 err = pci_enable_device(pdev);
3333 if (err) {
3334 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3335 pci_name(pdev));
3336 goto err_out;
3337 }
3338
3339 err = pci_request_regions(pdev, DRV_NAME);
3340 if (err) {
3341 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3342 pci_name(pdev));
3343 goto err_out_disable_pdev;
3344 }
3345
3346 pci_set_master(pdev);
3347
3348 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3349 pci_using_dac = 1;
3350 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3351 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3352 pci_using_dac = 0;
3353 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3354 }
3355
3356 if (err) {
3357 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3358 pci_name(pdev));
3359 goto err_out_free_regions;
3360 }
3361
3362 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3363 if (!ndev)
3364 goto err_out_free_regions;
3365
3366 SET_MODULE_OWNER(ndev);
3367 SET_NETDEV_DEV(ndev, &pdev->dev);
3368
3369 ndev->features = NETIF_F_LLTX;
3370 if (pci_using_dac)
3371 ndev->features |= NETIF_F_HIGHDMA;
3372
3373 pci_set_drvdata(pdev, ndev);
3374
3375 qdev = netdev_priv(ndev);
3376 qdev->index = cards_found;
3377 qdev->ndev = ndev;
3378 qdev->pdev = pdev;
3379 qdev->port_link_state = LS_DOWN;
3380 if (msi)
3381 qdev->msi = 1;
3382
3383 qdev->msg_enable = netif_msg_init(debug, default_msg);
3384
3385 qdev->mem_map_registers =
3386 ioremap_nocache(pci_resource_start(pdev, 1),
3387 pci_resource_len(qdev->pdev, 1));
3388 if (!qdev->mem_map_registers) {
3389 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3390 pci_name(pdev));
3391 goto err_out_free_ndev;
3392 }
3393
3394 spin_lock_init(&qdev->adapter_lock);
3395 spin_lock_init(&qdev->hw_lock);
3396
3397 /* Set driver entry points */
3398 ndev->open = ql3xxx_open;
3399 ndev->hard_start_xmit = ql3xxx_send;
3400 ndev->stop = ql3xxx_close;
3401 ndev->get_stats = ql3xxx_get_stats;
3402 ndev->change_mtu = ql3xxx_change_mtu;
3403 ndev->set_multicast_list = ql3xxx_set_multicast_list;
3404 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3405 ndev->set_mac_address = ql3xxx_set_mac_address;
3406 ndev->tx_timeout = ql3xxx_tx_timeout;
3407 ndev->watchdog_timeo = 5 * HZ;
3408
3409 ndev->poll = &ql_poll;
3410 ndev->weight = 64;
3411
3412 ndev->irq = pdev->irq;
3413
3414 /* make sure the EEPROM is good */
3415 if (ql_get_nvram_params(qdev)) {
3416 printk(KERN_ALERT PFX
3417 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
3418 qdev->index);
3419 goto err_out_iounmap;
3420 }
3421
3422 ql_set_mac_info(qdev);
3423
3424 /* Validate and set parameters */
3425 if (qdev->mac_index) {
3426 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
3427 ETH_ALEN);
3428 } else {
3429 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
3430 ETH_ALEN);
3431 }
3432 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3433
3434 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
3435
3436 /* Turn off support for multicasting */
3437 ndev->flags &= ~IFF_MULTICAST;
3438
3439 /* Record PCI bus information. */
3440 ql_get_board_info(qdev);
3441
3442 /*
3443 * Set the Maximum Memory Read Byte Count value. We do this to handle
3444 * jumbo frames.
3445 */
3446 if (qdev->pci_x) {
3447 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3448 }
3449
3450 err = register_netdev(ndev);
3451 if (err) {
3452 printk(KERN_ERR PFX "%s: cannot register net device\n",
3453 pci_name(pdev));
3454 goto err_out_iounmap;
3455 }
3456
3457 /* we're going to reset, so assume we have no link for now */
3458
3459 netif_carrier_off(ndev);
3460 netif_stop_queue(ndev);
3461
3462 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3463 INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev);
3464 INIT_WORK(&qdev->tx_timeout_work,
3465 (void (*)(void *))ql_tx_timeout_work, qdev);
3466
3467 init_timer(&qdev->adapter_timer);
3468 qdev->adapter_timer.function = ql3xxx_timer;
3469 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3470 qdev->adapter_timer.data = (unsigned long)qdev;
3471
3472 if(!cards_found) {
3473 printk(KERN_ALERT PFX "%s\n", DRV_STRING);
3474 printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
3475 DRV_NAME, DRV_VERSION);
3476 }
3477 ql_display_dev_info(ndev);
3478
3479 cards_found++;
3480 return 0;
3481
3482err_out_iounmap:
3483 iounmap(qdev->mem_map_registers);
3484err_out_free_ndev:
3485 free_netdev(ndev);
3486err_out_free_regions:
3487 pci_release_regions(pdev);
3488err_out_disable_pdev:
3489 pci_disable_device(pdev);
3490 pci_set_drvdata(pdev, NULL);
3491err_out:
3492 return err;
3493}
3494
3495static void __devexit ql3xxx_remove(struct pci_dev *pdev)
3496{
3497 struct net_device *ndev = pci_get_drvdata(pdev);
3498 struct ql3_adapter *qdev = netdev_priv(ndev);
3499
3500 unregister_netdev(ndev);
3501 qdev = netdev_priv(ndev);
3502
3503 ql_disable_interrupts(qdev);
3504
3505 if (qdev->workqueue) {
3506 cancel_delayed_work(&qdev->reset_work);
3507 cancel_delayed_work(&qdev->tx_timeout_work);
3508 destroy_workqueue(qdev->workqueue);
3509 qdev->workqueue = NULL;
3510 }
3511
3512 iounmap((void *)qdev->mmap_virt_base);
3513 pci_release_regions(pdev);
3514 pci_set_drvdata(pdev, NULL);
3515 free_netdev(ndev);
3516}
3517
3518static struct pci_driver ql3xxx_driver = {
3519
3520 .name = DRV_NAME,
3521 .id_table = ql3xxx_pci_tbl,
3522 .probe = ql3xxx_probe,
3523 .remove = __devexit_p(ql3xxx_remove),
3524};
3525
3526static int __init ql3xxx_init_module(void)
3527{
3528 return pci_register_driver(&ql3xxx_driver);
3529}
3530
3531static void __exit ql3xxx_exit(void)
3532{
3533 pci_unregister_driver(&ql3xxx_driver);
3534}
3535
3536module_init(ql3xxx_init_module);
3537module_exit(ql3xxx_exit);