diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qla3xxx.c')
-rw-r--r-- | drivers/net/ethernet/qlogic/qla3xxx.c | 3970 |
1 files changed, 3970 insertions, 0 deletions
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c new file mode 100644 index 000000000000..ccde8061afa8 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qla3xxx.c | |||
@@ -0,0 +1,3970 @@ | |||
1 | /* | ||
2 | * QLogic QLA3xxx NIC HBA Driver | ||
3 | * Copyright (c) 2003-2006 QLogic Corporation | ||
4 | * | ||
5 | * See LICENSE.qla3xxx for copyright and licensing details. | ||
6 | */ | ||
7 | |||
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/list.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/dmapool.h> | ||
20 | #include <linux/mempool.h> | ||
21 | #include <linux/spinlock.h> | ||
22 | #include <linux/kthread.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/ioport.h> | ||
26 | #include <linux/ip.h> | ||
27 | #include <linux/in.h> | ||
28 | #include <linux/if_arp.h> | ||
29 | #include <linux/if_ether.h> | ||
30 | #include <linux/netdevice.h> | ||
31 | #include <linux/etherdevice.h> | ||
32 | #include <linux/ethtool.h> | ||
33 | #include <linux/skbuff.h> | ||
34 | #include <linux/rtnetlink.h> | ||
35 | #include <linux/if_vlan.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/mm.h> | ||
38 | #include <linux/prefetch.h> | ||
39 | |||
40 | #include "qla3xxx.h" | ||
41 | |||
42 | #define DRV_NAME "qla3xxx" | ||
43 | #define DRV_STRING "QLogic ISP3XXX Network Driver" | ||
44 | #define DRV_VERSION "v2.03.00-k5" | ||
45 | |||
46 | static const char ql3xxx_driver_name[] = DRV_NAME; | ||
47 | static const char ql3xxx_driver_version[] = DRV_VERSION; | ||
48 | |||
49 | #define TIMED_OUT_MSG \ | ||
50 | "Timed out waiting for management port to get free before issuing command\n" | ||
51 | |||
52 | MODULE_AUTHOR("QLogic Corporation"); | ||
53 | MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); | ||
54 | MODULE_LICENSE("GPL"); | ||
55 | MODULE_VERSION(DRV_VERSION); | ||
56 | |||
57 | static const u32 default_msg | ||
58 | = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | ||
59 | | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; | ||
60 | |||
61 | static int debug = -1; /* defaults above */ | ||
62 | module_param(debug, int, 0); | ||
63 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | ||
64 | |||
65 | static int msi; | ||
66 | module_param(msi, int, 0); | ||
67 | MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); | ||
68 | |||
69 | static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = { | ||
70 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, | ||
71 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, | ||
72 | /* required last entry */ | ||
73 | {0,} | ||
74 | }; | ||
75 | |||
76 | MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); | ||
77 | |||
78 | /* | ||
79 | * These are the known PHY's which are used | ||
80 | */ | ||
81 | enum PHY_DEVICE_TYPE { | ||
82 | PHY_TYPE_UNKNOWN = 0, | ||
83 | PHY_VITESSE_VSC8211, | ||
84 | PHY_AGERE_ET1011C, | ||
85 | MAX_PHY_DEV_TYPES | ||
86 | }; | ||
87 | |||
88 | struct PHY_DEVICE_INFO { | ||
89 | const enum PHY_DEVICE_TYPE phyDevice; | ||
90 | const u32 phyIdOUI; | ||
91 | const u16 phyIdModel; | ||
92 | const char *name; | ||
93 | }; | ||
94 | |||
95 | static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { | ||
96 | {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, | ||
97 | {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, | ||
98 | {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, | ||
99 | }; | ||
100 | |||
101 | |||
102 | /* | ||
103 | * Caller must take hw_lock. | ||
104 | */ | ||
105 | static int ql_sem_spinlock(struct ql3_adapter *qdev, | ||
106 | u32 sem_mask, u32 sem_bits) | ||
107 | { | ||
108 | struct ql3xxx_port_registers __iomem *port_regs = | ||
109 | qdev->mem_map_registers; | ||
110 | u32 value; | ||
111 | unsigned int seconds = 3; | ||
112 | |||
113 | do { | ||
114 | writel((sem_mask | sem_bits), | ||
115 | &port_regs->CommonRegs.semaphoreReg); | ||
116 | value = readl(&port_regs->CommonRegs.semaphoreReg); | ||
117 | if ((value & (sem_mask >> 16)) == sem_bits) | ||
118 | return 0; | ||
119 | ssleep(1); | ||
120 | } while (--seconds); | ||
121 | return -1; | ||
122 | } | ||
123 | |||
124 | static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) | ||
125 | { | ||
126 | struct ql3xxx_port_registers __iomem *port_regs = | ||
127 | qdev->mem_map_registers; | ||
128 | writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); | ||
129 | readl(&port_regs->CommonRegs.semaphoreReg); | ||
130 | } | ||
131 | |||
132 | static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) | ||
133 | { | ||
134 | struct ql3xxx_port_registers __iomem *port_regs = | ||
135 | qdev->mem_map_registers; | ||
136 | u32 value; | ||
137 | |||
138 | writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); | ||
139 | value = readl(&port_regs->CommonRegs.semaphoreReg); | ||
140 | return ((value & (sem_mask >> 16)) == sem_bits); | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Caller holds hw_lock. | ||
145 | */ | ||
146 | static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) | ||
147 | { | ||
148 | int i = 0; | ||
149 | |||
150 | while (i < 10) { | ||
151 | if (i) | ||
152 | ssleep(1); | ||
153 | |||
154 | if (ql_sem_lock(qdev, | ||
155 | QL_DRVR_SEM_MASK, | ||
156 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) | ||
157 | * 2) << 1)) { | ||
158 | netdev_printk(KERN_DEBUG, qdev->ndev, | ||
159 | "driver lock acquired\n"); | ||
160 | return 1; | ||
161 | } | ||
162 | } | ||
163 | |||
164 | netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) | ||
169 | { | ||
170 | struct ql3xxx_port_registers __iomem *port_regs = | ||
171 | qdev->mem_map_registers; | ||
172 | |||
173 | writel(((ISP_CONTROL_NP_MASK << 16) | page), | ||
174 | &port_regs->CommonRegs.ispControlStatus); | ||
175 | readl(&port_regs->CommonRegs.ispControlStatus); | ||
176 | qdev->current_page = page; | ||
177 | } | ||
178 | |||
179 | static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) | ||
180 | { | ||
181 | u32 value; | ||
182 | unsigned long hw_flags; | ||
183 | |||
184 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
185 | value = readl(reg); | ||
186 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
187 | |||
188 | return value; | ||
189 | } | ||
190 | |||
191 | static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) | ||
192 | { | ||
193 | return readl(reg); | ||
194 | } | ||
195 | |||
196 | static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) | ||
197 | { | ||
198 | u32 value; | ||
199 | unsigned long hw_flags; | ||
200 | |||
201 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
202 | |||
203 | if (qdev->current_page != 0) | ||
204 | ql_set_register_page(qdev, 0); | ||
205 | value = readl(reg); | ||
206 | |||
207 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
208 | return value; | ||
209 | } | ||
210 | |||
211 | static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) | ||
212 | { | ||
213 | if (qdev->current_page != 0) | ||
214 | ql_set_register_page(qdev, 0); | ||
215 | return readl(reg); | ||
216 | } | ||
217 | |||
218 | static void ql_write_common_reg_l(struct ql3_adapter *qdev, | ||
219 | u32 __iomem *reg, u32 value) | ||
220 | { | ||
221 | unsigned long hw_flags; | ||
222 | |||
223 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
224 | writel(value, reg); | ||
225 | readl(reg); | ||
226 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
227 | } | ||
228 | |||
229 | static void ql_write_common_reg(struct ql3_adapter *qdev, | ||
230 | u32 __iomem *reg, u32 value) | ||
231 | { | ||
232 | writel(value, reg); | ||
233 | readl(reg); | ||
234 | } | ||
235 | |||
236 | static void ql_write_nvram_reg(struct ql3_adapter *qdev, | ||
237 | u32 __iomem *reg, u32 value) | ||
238 | { | ||
239 | writel(value, reg); | ||
240 | readl(reg); | ||
241 | udelay(1); | ||
242 | } | ||
243 | |||
244 | static void ql_write_page0_reg(struct ql3_adapter *qdev, | ||
245 | u32 __iomem *reg, u32 value) | ||
246 | { | ||
247 | if (qdev->current_page != 0) | ||
248 | ql_set_register_page(qdev, 0); | ||
249 | writel(value, reg); | ||
250 | readl(reg); | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * Caller holds hw_lock. Only called during init. | ||
255 | */ | ||
256 | static void ql_write_page1_reg(struct ql3_adapter *qdev, | ||
257 | u32 __iomem *reg, u32 value) | ||
258 | { | ||
259 | if (qdev->current_page != 1) | ||
260 | ql_set_register_page(qdev, 1); | ||
261 | writel(value, reg); | ||
262 | readl(reg); | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * Caller holds hw_lock. Only called during init. | ||
267 | */ | ||
268 | static void ql_write_page2_reg(struct ql3_adapter *qdev, | ||
269 | u32 __iomem *reg, u32 value) | ||
270 | { | ||
271 | if (qdev->current_page != 2) | ||
272 | ql_set_register_page(qdev, 2); | ||
273 | writel(value, reg); | ||
274 | readl(reg); | ||
275 | } | ||
276 | |||
277 | static void ql_disable_interrupts(struct ql3_adapter *qdev) | ||
278 | { | ||
279 | struct ql3xxx_port_registers __iomem *port_regs = | ||
280 | qdev->mem_map_registers; | ||
281 | |||
282 | ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, | ||
283 | (ISP_IMR_ENABLE_INT << 16)); | ||
284 | |||
285 | } | ||
286 | |||
287 | static void ql_enable_interrupts(struct ql3_adapter *qdev) | ||
288 | { | ||
289 | struct ql3xxx_port_registers __iomem *port_regs = | ||
290 | qdev->mem_map_registers; | ||
291 | |||
292 | ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, | ||
293 | ((0xff << 16) | ISP_IMR_ENABLE_INT)); | ||
294 | |||
295 | } | ||
296 | |||
297 | static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, | ||
298 | struct ql_rcv_buf_cb *lrg_buf_cb) | ||
299 | { | ||
300 | dma_addr_t map; | ||
301 | int err; | ||
302 | lrg_buf_cb->next = NULL; | ||
303 | |||
304 | if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ | ||
305 | qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; | ||
306 | } else { | ||
307 | qdev->lrg_buf_free_tail->next = lrg_buf_cb; | ||
308 | qdev->lrg_buf_free_tail = lrg_buf_cb; | ||
309 | } | ||
310 | |||
311 | if (!lrg_buf_cb->skb) { | ||
312 | lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, | ||
313 | qdev->lrg_buffer_len); | ||
314 | if (unlikely(!lrg_buf_cb->skb)) { | ||
315 | netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); | ||
316 | qdev->lrg_buf_skb_check++; | ||
317 | } else { | ||
318 | /* | ||
319 | * We save some space to copy the ethhdr from first | ||
320 | * buffer | ||
321 | */ | ||
322 | skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); | ||
323 | map = pci_map_single(qdev->pdev, | ||
324 | lrg_buf_cb->skb->data, | ||
325 | qdev->lrg_buffer_len - | ||
326 | QL_HEADER_SPACE, | ||
327 | PCI_DMA_FROMDEVICE); | ||
328 | err = pci_dma_mapping_error(qdev->pdev, map); | ||
329 | if (err) { | ||
330 | netdev_err(qdev->ndev, | ||
331 | "PCI mapping failed with error: %d\n", | ||
332 | err); | ||
333 | dev_kfree_skb(lrg_buf_cb->skb); | ||
334 | lrg_buf_cb->skb = NULL; | ||
335 | |||
336 | qdev->lrg_buf_skb_check++; | ||
337 | return; | ||
338 | } | ||
339 | |||
340 | lrg_buf_cb->buf_phy_addr_low = | ||
341 | cpu_to_le32(LS_64BITS(map)); | ||
342 | lrg_buf_cb->buf_phy_addr_high = | ||
343 | cpu_to_le32(MS_64BITS(map)); | ||
344 | dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); | ||
345 | dma_unmap_len_set(lrg_buf_cb, maplen, | ||
346 | qdev->lrg_buffer_len - | ||
347 | QL_HEADER_SPACE); | ||
348 | } | ||
349 | } | ||
350 | |||
351 | qdev->lrg_buf_free_count++; | ||
352 | } | ||
353 | |||
354 | static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter | ||
355 | *qdev) | ||
356 | { | ||
357 | struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; | ||
358 | |||
359 | if (lrg_buf_cb != NULL) { | ||
360 | qdev->lrg_buf_free_head = lrg_buf_cb->next; | ||
361 | if (qdev->lrg_buf_free_head == NULL) | ||
362 | qdev->lrg_buf_free_tail = NULL; | ||
363 | qdev->lrg_buf_free_count--; | ||
364 | } | ||
365 | |||
366 | return lrg_buf_cb; | ||
367 | } | ||
368 | |||
369 | static u32 addrBits = EEPROM_NO_ADDR_BITS; | ||
370 | static u32 dataBits = EEPROM_NO_DATA_BITS; | ||
371 | |||
372 | static void fm93c56a_deselect(struct ql3_adapter *qdev); | ||
373 | static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, | ||
374 | unsigned short *value); | ||
375 | |||
376 | /* | ||
377 | * Caller holds hw_lock. | ||
378 | */ | ||
379 | static void fm93c56a_select(struct ql3_adapter *qdev) | ||
380 | { | ||
381 | struct ql3xxx_port_registers __iomem *port_regs = | ||
382 | qdev->mem_map_registers; | ||
383 | __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
384 | |||
385 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; | ||
386 | ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); | ||
387 | ql_write_nvram_reg(qdev, spir, | ||
388 | ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * Caller holds hw_lock. | ||
393 | */ | ||
394 | static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) | ||
395 | { | ||
396 | int i; | ||
397 | u32 mask; | ||
398 | u32 dataBit; | ||
399 | u32 previousBit; | ||
400 | struct ql3xxx_port_registers __iomem *port_regs = | ||
401 | qdev->mem_map_registers; | ||
402 | __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
403 | |||
404 | /* Clock in a zero, then do the start bit */ | ||
405 | ql_write_nvram_reg(qdev, spir, | ||
406 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | ||
407 | AUBURN_EEPROM_DO_1)); | ||
408 | ql_write_nvram_reg(qdev, spir, | ||
409 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | ||
410 | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); | ||
411 | ql_write_nvram_reg(qdev, spir, | ||
412 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | ||
413 | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); | ||
414 | |||
415 | mask = 1 << (FM93C56A_CMD_BITS - 1); | ||
416 | /* Force the previous data bit to be different */ | ||
417 | previousBit = 0xffff; | ||
418 | for (i = 0; i < FM93C56A_CMD_BITS; i++) { | ||
419 | dataBit = (cmd & mask) | ||
420 | ? AUBURN_EEPROM_DO_1 | ||
421 | : AUBURN_EEPROM_DO_0; | ||
422 | if (previousBit != dataBit) { | ||
423 | /* If the bit changed, change the DO state to match */ | ||
424 | ql_write_nvram_reg(qdev, spir, | ||
425 | (ISP_NVRAM_MASK | | ||
426 | qdev->eeprom_cmd_data | dataBit)); | ||
427 | previousBit = dataBit; | ||
428 | } | ||
429 | ql_write_nvram_reg(qdev, spir, | ||
430 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | ||
431 | dataBit | AUBURN_EEPROM_CLK_RISE)); | ||
432 | ql_write_nvram_reg(qdev, spir, | ||
433 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | ||
434 | dataBit | AUBURN_EEPROM_CLK_FALL)); | ||
435 | cmd = cmd << 1; | ||
436 | } | ||
437 | |||
438 | mask = 1 << (addrBits - 1); | ||
439 | /* Force the previous data bit to be different */ | ||
440 | previousBit = 0xffff; | ||
441 | for (i = 0; i < addrBits; i++) { | ||
442 | dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 | ||
443 | : AUBURN_EEPROM_DO_0; | ||
444 | if (previousBit != dataBit) { | ||
445 | /* | ||
446 | * If the bit changed, then change the DO state to | ||
447 | * match | ||
448 | */ | ||
449 | ql_write_nvram_reg(qdev, spir, | ||
450 | (ISP_NVRAM_MASK | | ||
451 | qdev->eeprom_cmd_data | dataBit)); | ||
452 | previousBit = dataBit; | ||
453 | } | ||
454 | ql_write_nvram_reg(qdev, spir, | ||
455 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | ||
456 | dataBit | AUBURN_EEPROM_CLK_RISE)); | ||
457 | ql_write_nvram_reg(qdev, spir, | ||
458 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | ||
459 | dataBit | AUBURN_EEPROM_CLK_FALL)); | ||
460 | eepromAddr = eepromAddr << 1; | ||
461 | } | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * Caller holds hw_lock. | ||
466 | */ | ||
467 | static void fm93c56a_deselect(struct ql3_adapter *qdev) | ||
468 | { | ||
469 | struct ql3xxx_port_registers __iomem *port_regs = | ||
470 | qdev->mem_map_registers; | ||
471 | __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
472 | |||
473 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; | ||
474 | ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); | ||
475 | } | ||
476 | |||
477 | /* | ||
478 | * Caller holds hw_lock. | ||
479 | */ | ||
480 | static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) | ||
481 | { | ||
482 | int i; | ||
483 | u32 data = 0; | ||
484 | u32 dataBit; | ||
485 | struct ql3xxx_port_registers __iomem *port_regs = | ||
486 | qdev->mem_map_registers; | ||
487 | __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
488 | |||
489 | /* Read the data bits */ | ||
490 | /* The first bit is a dummy. Clock right over it. */ | ||
491 | for (i = 0; i < dataBits; i++) { | ||
492 | ql_write_nvram_reg(qdev, spir, | ||
493 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | ||
494 | AUBURN_EEPROM_CLK_RISE); | ||
495 | ql_write_nvram_reg(qdev, spir, | ||
496 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | ||
497 | AUBURN_EEPROM_CLK_FALL); | ||
498 | dataBit = (ql_read_common_reg(qdev, spir) & | ||
499 | AUBURN_EEPROM_DI_1) ? 1 : 0; | ||
500 | data = (data << 1) | dataBit; | ||
501 | } | ||
502 | *value = (u16)data; | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | * Caller holds hw_lock. | ||
507 | */ | ||
508 | static void eeprom_readword(struct ql3_adapter *qdev, | ||
509 | u32 eepromAddr, unsigned short *value) | ||
510 | { | ||
511 | fm93c56a_select(qdev); | ||
512 | fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); | ||
513 | fm93c56a_datain(qdev, value); | ||
514 | fm93c56a_deselect(qdev); | ||
515 | } | ||
516 | |||
517 | static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) | ||
518 | { | ||
519 | __le16 *p = (__le16 *)ndev->dev_addr; | ||
520 | p[0] = cpu_to_le16(addr[0]); | ||
521 | p[1] = cpu_to_le16(addr[1]); | ||
522 | p[2] = cpu_to_le16(addr[2]); | ||
523 | } | ||
524 | |||
525 | static int ql_get_nvram_params(struct ql3_adapter *qdev) | ||
526 | { | ||
527 | u16 *pEEPROMData; | ||
528 | u16 checksum = 0; | ||
529 | u32 index; | ||
530 | unsigned long hw_flags; | ||
531 | |||
532 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
533 | |||
534 | pEEPROMData = (u16 *)&qdev->nvram_data; | ||
535 | qdev->eeprom_cmd_data = 0; | ||
536 | if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, | ||
537 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | ||
538 | 2) << 10)) { | ||
539 | pr_err("%s: Failed ql_sem_spinlock()\n", __func__); | ||
540 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
541 | return -1; | ||
542 | } | ||
543 | |||
544 | for (index = 0; index < EEPROM_SIZE; index++) { | ||
545 | eeprom_readword(qdev, index, pEEPROMData); | ||
546 | checksum += *pEEPROMData; | ||
547 | pEEPROMData++; | ||
548 | } | ||
549 | ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); | ||
550 | |||
551 | if (checksum != 0) { | ||
552 | netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", | ||
553 | checksum); | ||
554 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
555 | return -1; | ||
556 | } | ||
557 | |||
558 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
559 | return checksum; | ||
560 | } | ||
561 | |||
562 | static const u32 PHYAddr[2] = { | ||
563 | PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS | ||
564 | }; | ||
565 | |||
566 | static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) | ||
567 | { | ||
568 | struct ql3xxx_port_registers __iomem *port_regs = | ||
569 | qdev->mem_map_registers; | ||
570 | u32 temp; | ||
571 | int count = 1000; | ||
572 | |||
573 | while (count) { | ||
574 | temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); | ||
575 | if (!(temp & MAC_MII_STATUS_BSY)) | ||
576 | return 0; | ||
577 | udelay(10); | ||
578 | count--; | ||
579 | } | ||
580 | return -1; | ||
581 | } | ||
582 | |||
583 | static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) | ||
584 | { | ||
585 | struct ql3xxx_port_registers __iomem *port_regs = | ||
586 | qdev->mem_map_registers; | ||
587 | u32 scanControl; | ||
588 | |||
589 | if (qdev->numPorts > 1) { | ||
590 | /* Auto scan will cycle through multiple ports */ | ||
591 | scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; | ||
592 | } else { | ||
593 | scanControl = MAC_MII_CONTROL_SC; | ||
594 | } | ||
595 | |||
596 | /* | ||
597 | * Scan register 1 of PHY/PETBI, | ||
598 | * Set up to scan both devices | ||
599 | * The autoscan starts from the first register, completes | ||
600 | * the last one before rolling over to the first | ||
601 | */ | ||
602 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, | ||
603 | PHYAddr[0] | MII_SCAN_REGISTER); | ||
604 | |||
605 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | ||
606 | (scanControl) | | ||
607 | ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); | ||
608 | } | ||
609 | |||
610 | static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) | ||
611 | { | ||
612 | u8 ret; | ||
613 | struct ql3xxx_port_registers __iomem *port_regs = | ||
614 | qdev->mem_map_registers; | ||
615 | |||
616 | /* See if scan mode is enabled before we turn it off */ | ||
617 | if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & | ||
618 | (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { | ||
619 | /* Scan is enabled */ | ||
620 | ret = 1; | ||
621 | } else { | ||
622 | /* Scan is disabled */ | ||
623 | ret = 0; | ||
624 | } | ||
625 | |||
626 | /* | ||
627 | * When disabling scan mode you must first change the MII register | ||
628 | * address | ||
629 | */ | ||
630 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, | ||
631 | PHYAddr[0] | MII_SCAN_REGISTER); | ||
632 | |||
633 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | ||
634 | ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | | ||
635 | MAC_MII_CONTROL_RC) << 16)); | ||
636 | |||
637 | return ret; | ||
638 | } | ||
639 | |||
640 | static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, | ||
641 | u16 regAddr, u16 value, u32 phyAddr) | ||
642 | { | ||
643 | struct ql3xxx_port_registers __iomem *port_regs = | ||
644 | qdev->mem_map_registers; | ||
645 | u8 scanWasEnabled; | ||
646 | |||
647 | scanWasEnabled = ql_mii_disable_scan_mode(qdev); | ||
648 | |||
649 | if (ql_wait_for_mii_ready(qdev)) { | ||
650 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); | ||
651 | return -1; | ||
652 | } | ||
653 | |||
654 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, | ||
655 | phyAddr | regAddr); | ||
656 | |||
657 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); | ||
658 | |||
659 | /* Wait for write to complete 9/10/04 SJP */ | ||
660 | if (ql_wait_for_mii_ready(qdev)) { | ||
661 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); | ||
662 | return -1; | ||
663 | } | ||
664 | |||
665 | if (scanWasEnabled) | ||
666 | ql_mii_enable_scan_mode(qdev); | ||
667 | |||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, | ||
672 | u16 *value, u32 phyAddr) | ||
673 | { | ||
674 | struct ql3xxx_port_registers __iomem *port_regs = | ||
675 | qdev->mem_map_registers; | ||
676 | u8 scanWasEnabled; | ||
677 | u32 temp; | ||
678 | |||
679 | scanWasEnabled = ql_mii_disable_scan_mode(qdev); | ||
680 | |||
681 | if (ql_wait_for_mii_ready(qdev)) { | ||
682 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); | ||
683 | return -1; | ||
684 | } | ||
685 | |||
686 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, | ||
687 | phyAddr | regAddr); | ||
688 | |||
689 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | ||
690 | (MAC_MII_CONTROL_RC << 16)); | ||
691 | |||
692 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | ||
693 | (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); | ||
694 | |||
695 | /* Wait for the read to complete */ | ||
696 | if (ql_wait_for_mii_ready(qdev)) { | ||
697 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); | ||
698 | return -1; | ||
699 | } | ||
700 | |||
701 | temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); | ||
702 | *value = (u16) temp; | ||
703 | |||
704 | if (scanWasEnabled) | ||
705 | ql_mii_enable_scan_mode(qdev); | ||
706 | |||
707 | return 0; | ||
708 | } | ||
709 | |||
710 | static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) | ||
711 | { | ||
712 | struct ql3xxx_port_registers __iomem *port_regs = | ||
713 | qdev->mem_map_registers; | ||
714 | |||
715 | ql_mii_disable_scan_mode(qdev); | ||
716 | |||
717 | if (ql_wait_for_mii_ready(qdev)) { | ||
718 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); | ||
719 | return -1; | ||
720 | } | ||
721 | |||
722 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, | ||
723 | qdev->PHYAddr | regAddr); | ||
724 | |||
725 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); | ||
726 | |||
727 | /* Wait for write to complete. */ | ||
728 | if (ql_wait_for_mii_ready(qdev)) { | ||
729 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); | ||
730 | return -1; | ||
731 | } | ||
732 | |||
733 | ql_mii_enable_scan_mode(qdev); | ||
734 | |||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) | ||
739 | { | ||
740 | u32 temp; | ||
741 | struct ql3xxx_port_registers __iomem *port_regs = | ||
742 | qdev->mem_map_registers; | ||
743 | |||
744 | ql_mii_disable_scan_mode(qdev); | ||
745 | |||
746 | if (ql_wait_for_mii_ready(qdev)) { | ||
747 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); | ||
748 | return -1; | ||
749 | } | ||
750 | |||
751 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, | ||
752 | qdev->PHYAddr | regAddr); | ||
753 | |||
754 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | ||
755 | (MAC_MII_CONTROL_RC << 16)); | ||
756 | |||
757 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | ||
758 | (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); | ||
759 | |||
760 | /* Wait for the read to complete */ | ||
761 | if (ql_wait_for_mii_ready(qdev)) { | ||
762 | netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); | ||
763 | return -1; | ||
764 | } | ||
765 | |||
766 | temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); | ||
767 | *value = (u16) temp; | ||
768 | |||
769 | ql_mii_enable_scan_mode(qdev); | ||
770 | |||
771 | return 0; | ||
772 | } | ||
773 | |||
774 | static void ql_petbi_reset(struct ql3_adapter *qdev) | ||
775 | { | ||
776 | ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); | ||
777 | } | ||
778 | |||
779 | static void ql_petbi_start_neg(struct ql3_adapter *qdev) | ||
780 | { | ||
781 | u16 reg; | ||
782 | |||
783 | /* Enable Auto-negotiation sense */ | ||
784 | ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); | ||
785 | reg |= PETBI_TBI_AUTO_SENSE; | ||
786 | ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); | ||
787 | |||
788 | ql_mii_write_reg(qdev, PETBI_NEG_ADVER, | ||
789 | PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); | ||
790 | |||
791 | ql_mii_write_reg(qdev, PETBI_CONTROL_REG, | ||
792 | PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | | ||
793 | PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); | ||
794 | |||
795 | } | ||
796 | |||
797 | static void ql_petbi_reset_ex(struct ql3_adapter *qdev) | ||
798 | { | ||
799 | ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, | ||
800 | PHYAddr[qdev->mac_index]); | ||
801 | } | ||
802 | |||
803 | static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) | ||
804 | { | ||
805 | u16 reg; | ||
806 | |||
807 | /* Enable Auto-negotiation sense */ | ||
808 | ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, | ||
809 | PHYAddr[qdev->mac_index]); | ||
810 | reg |= PETBI_TBI_AUTO_SENSE; | ||
811 | ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, | ||
812 | PHYAddr[qdev->mac_index]); | ||
813 | |||
814 | ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, | ||
815 | PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, | ||
816 | PHYAddr[qdev->mac_index]); | ||
817 | |||
818 | ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, | ||
819 | PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | | ||
820 | PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, | ||
821 | PHYAddr[qdev->mac_index]); | ||
822 | } | ||
823 | |||
824 | static void ql_petbi_init(struct ql3_adapter *qdev) | ||
825 | { | ||
826 | ql_petbi_reset(qdev); | ||
827 | ql_petbi_start_neg(qdev); | ||
828 | } | ||
829 | |||
830 | static void ql_petbi_init_ex(struct ql3_adapter *qdev) | ||
831 | { | ||
832 | ql_petbi_reset_ex(qdev); | ||
833 | ql_petbi_start_neg_ex(qdev); | ||
834 | } | ||
835 | |||
836 | static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) | ||
837 | { | ||
838 | u16 reg; | ||
839 | |||
840 | if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) | ||
841 | return 0; | ||
842 | |||
843 | return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; | ||
844 | } | ||
845 | |||
846 | static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) | ||
847 | { | ||
848 | netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); | ||
849 | /* power down device bit 11 = 1 */ | ||
850 | ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); | ||
851 | /* enable diagnostic mode bit 2 = 1 */ | ||
852 | ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); | ||
853 | /* 1000MB amplitude adjust (see Agere errata) */ | ||
854 | ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); | ||
855 | /* 1000MB amplitude adjust (see Agere errata) */ | ||
856 | ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); | ||
857 | /* 100MB amplitude adjust (see Agere errata) */ | ||
858 | ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); | ||
859 | /* 100MB amplitude adjust (see Agere errata) */ | ||
860 | ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); | ||
861 | /* 10MB amplitude adjust (see Agere errata) */ | ||
862 | ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); | ||
863 | /* 10MB amplitude adjust (see Agere errata) */ | ||
864 | ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); | ||
865 | /* point to hidden reg 0x2806 */ | ||
866 | ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); | ||
867 | /* Write new PHYAD w/bit 5 set */ | ||
868 | ql_mii_write_reg_ex(qdev, 0x11, | ||
869 | 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); | ||
870 | /* | ||
871 | * Disable diagnostic mode bit 2 = 0 | ||
872 | * Power up device bit 11 = 0 | ||
873 | * Link up (on) and activity (blink) | ||
874 | */ | ||
875 | ql_mii_write_reg(qdev, 0x12, 0x840a); | ||
876 | ql_mii_write_reg(qdev, 0x00, 0x1140); | ||
877 | ql_mii_write_reg(qdev, 0x1c, 0xfaf0); | ||
878 | } | ||
879 | |||
880 | static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, | ||
881 | u16 phyIdReg0, u16 phyIdReg1) | ||
882 | { | ||
883 | enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; | ||
884 | u32 oui; | ||
885 | u16 model; | ||
886 | int i; | ||
887 | |||
888 | if (phyIdReg0 == 0xffff) | ||
889 | return result; | ||
890 | |||
891 | if (phyIdReg1 == 0xffff) | ||
892 | return result; | ||
893 | |||
894 | /* oui is split between two registers */ | ||
895 | oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); | ||
896 | |||
897 | model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; | ||
898 | |||
899 | /* Scan table for this PHY */ | ||
900 | for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { | ||
901 | if ((oui == PHY_DEVICES[i].phyIdOUI) && | ||
902 | (model == PHY_DEVICES[i].phyIdModel)) { | ||
903 | netdev_info(qdev->ndev, "Phy: %s\n", | ||
904 | PHY_DEVICES[i].name); | ||
905 | result = PHY_DEVICES[i].phyDevice; | ||
906 | break; | ||
907 | } | ||
908 | } | ||
909 | |||
910 | return result; | ||
911 | } | ||
912 | |||
913 | static int ql_phy_get_speed(struct ql3_adapter *qdev) | ||
914 | { | ||
915 | u16 reg; | ||
916 | |||
917 | switch (qdev->phyType) { | ||
918 | case PHY_AGERE_ET1011C: { | ||
919 | if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) | ||
920 | return 0; | ||
921 | |||
922 | reg = (reg >> 8) & 3; | ||
923 | break; | ||
924 | } | ||
925 | default: | ||
926 | if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) | ||
927 | return 0; | ||
928 | |||
929 | reg = (((reg & 0x18) >> 3) & 3); | ||
930 | } | ||
931 | |||
932 | switch (reg) { | ||
933 | case 2: | ||
934 | return SPEED_1000; | ||
935 | case 1: | ||
936 | return SPEED_100; | ||
937 | case 0: | ||
938 | return SPEED_10; | ||
939 | default: | ||
940 | return -1; | ||
941 | } | ||
942 | } | ||
943 | |||
944 | static int ql_is_full_dup(struct ql3_adapter *qdev) | ||
945 | { | ||
946 | u16 reg; | ||
947 | |||
948 | switch (qdev->phyType) { | ||
949 | case PHY_AGERE_ET1011C: { | ||
950 | if (ql_mii_read_reg(qdev, 0x1A, ®)) | ||
951 | return 0; | ||
952 | |||
953 | return ((reg & 0x0080) && (reg & 0x1000)) != 0; | ||
954 | } | ||
955 | case PHY_VITESSE_VSC8211: | ||
956 | default: { | ||
957 | if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) | ||
958 | return 0; | ||
959 | return (reg & PHY_AUX_DUPLEX_STAT) != 0; | ||
960 | } | ||
961 | } | ||
962 | } | ||
963 | |||
964 | static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) | ||
965 | { | ||
966 | u16 reg; | ||
967 | |||
968 | if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) | ||
969 | return 0; | ||
970 | |||
971 | return (reg & PHY_NEG_PAUSE) != 0; | ||
972 | } | ||
973 | |||
974 | static int PHY_Setup(struct ql3_adapter *qdev) | ||
975 | { | ||
976 | u16 reg1; | ||
977 | u16 reg2; | ||
978 | bool agereAddrChangeNeeded = false; | ||
979 | u32 miiAddr = 0; | ||
980 | int err; | ||
981 | |||
982 | /* Determine the PHY we are using by reading the ID's */ | ||
983 | err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); | ||
984 | if (err != 0) { | ||
985 | netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); | ||
986 | return err; | ||
987 | } | ||
988 | |||
989 | err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); | ||
990 | if (err != 0) { | ||
991 | netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); | ||
992 | return err; | ||
993 | } | ||
994 | |||
995 | /* Check if we have a Agere PHY */ | ||
996 | if ((reg1 == 0xffff) || (reg2 == 0xffff)) { | ||
997 | |||
998 | /* Determine which MII address we should be using | ||
999 | determined by the index of the card */ | ||
1000 | if (qdev->mac_index == 0) | ||
1001 | miiAddr = MII_AGERE_ADDR_1; | ||
1002 | else | ||
1003 | miiAddr = MII_AGERE_ADDR_2; | ||
1004 | |||
1005 | err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); | ||
1006 | if (err != 0) { | ||
1007 | netdev_err(qdev->ndev, | ||
1008 | "Could not read from reg PHY_ID_0_REG after Agere detected\n"); | ||
1009 | return err; | ||
1010 | } | ||
1011 | |||
1012 | err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); | ||
1013 | if (err != 0) { | ||
1014 | netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); | ||
1015 | return err; | ||
1016 | } | ||
1017 | |||
1018 | /* We need to remember to initialize the Agere PHY */ | ||
1019 | agereAddrChangeNeeded = true; | ||
1020 | } | ||
1021 | |||
1022 | /* Determine the particular PHY we have on board to apply | ||
1023 | PHY specific initializations */ | ||
1024 | qdev->phyType = getPhyType(qdev, reg1, reg2); | ||
1025 | |||
1026 | if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { | ||
1027 | /* need this here so address gets changed */ | ||
1028 | phyAgereSpecificInit(qdev, miiAddr); | ||
1029 | } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { | ||
1030 | netdev_err(qdev->ndev, "PHY is unknown\n"); | ||
1031 | return -EIO; | ||
1032 | } | ||
1033 | |||
1034 | return 0; | ||
1035 | } | ||
1036 | |||
1037 | /* | ||
1038 | * Caller holds hw_lock. | ||
1039 | */ | ||
1040 | static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) | ||
1041 | { | ||
1042 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1043 | qdev->mem_map_registers; | ||
1044 | u32 value; | ||
1045 | |||
1046 | if (enable) | ||
1047 | value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); | ||
1048 | else | ||
1049 | value = (MAC_CONFIG_REG_PE << 16); | ||
1050 | |||
1051 | if (qdev->mac_index) | ||
1052 | ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); | ||
1053 | else | ||
1054 | ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); | ||
1055 | } | ||
1056 | |||
1057 | /* | ||
1058 | * Caller holds hw_lock. | ||
1059 | */ | ||
1060 | static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) | ||
1061 | { | ||
1062 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1063 | qdev->mem_map_registers; | ||
1064 | u32 value; | ||
1065 | |||
1066 | if (enable) | ||
1067 | value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); | ||
1068 | else | ||
1069 | value = (MAC_CONFIG_REG_SR << 16); | ||
1070 | |||
1071 | if (qdev->mac_index) | ||
1072 | ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); | ||
1073 | else | ||
1074 | ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); | ||
1075 | } | ||
1076 | |||
1077 | /* | ||
1078 | * Caller holds hw_lock. | ||
1079 | */ | ||
1080 | static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) | ||
1081 | { | ||
1082 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1083 | qdev->mem_map_registers; | ||
1084 | u32 value; | ||
1085 | |||
1086 | if (enable) | ||
1087 | value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); | ||
1088 | else | ||
1089 | value = (MAC_CONFIG_REG_GM << 16); | ||
1090 | |||
1091 | if (qdev->mac_index) | ||
1092 | ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); | ||
1093 | else | ||
1094 | ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); | ||
1095 | } | ||
1096 | |||
1097 | /* | ||
1098 | * Caller holds hw_lock. | ||
1099 | */ | ||
1100 | static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) | ||
1101 | { | ||
1102 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1103 | qdev->mem_map_registers; | ||
1104 | u32 value; | ||
1105 | |||
1106 | if (enable) | ||
1107 | value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); | ||
1108 | else | ||
1109 | value = (MAC_CONFIG_REG_FD << 16); | ||
1110 | |||
1111 | if (qdev->mac_index) | ||
1112 | ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); | ||
1113 | else | ||
1114 | ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); | ||
1115 | } | ||
1116 | |||
1117 | /* | ||
1118 | * Caller holds hw_lock. | ||
1119 | */ | ||
1120 | static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) | ||
1121 | { | ||
1122 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1123 | qdev->mem_map_registers; | ||
1124 | u32 value; | ||
1125 | |||
1126 | if (enable) | ||
1127 | value = | ||
1128 | ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | | ||
1129 | ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); | ||
1130 | else | ||
1131 | value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); | ||
1132 | |||
1133 | if (qdev->mac_index) | ||
1134 | ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); | ||
1135 | else | ||
1136 | ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); | ||
1137 | } | ||
1138 | |||
1139 | /* | ||
1140 | * Caller holds hw_lock. | ||
1141 | */ | ||
1142 | static int ql_is_fiber(struct ql3_adapter *qdev) | ||
1143 | { | ||
1144 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1145 | qdev->mem_map_registers; | ||
1146 | u32 bitToCheck = 0; | ||
1147 | u32 temp; | ||
1148 | |||
1149 | switch (qdev->mac_index) { | ||
1150 | case 0: | ||
1151 | bitToCheck = PORT_STATUS_SM0; | ||
1152 | break; | ||
1153 | case 1: | ||
1154 | bitToCheck = PORT_STATUS_SM1; | ||
1155 | break; | ||
1156 | } | ||
1157 | |||
1158 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | ||
1159 | return (temp & bitToCheck) != 0; | ||
1160 | } | ||
1161 | |||
1162 | static int ql_is_auto_cfg(struct ql3_adapter *qdev) | ||
1163 | { | ||
1164 | u16 reg; | ||
1165 | ql_mii_read_reg(qdev, 0x00, ®); | ||
1166 | return (reg & 0x1000) != 0; | ||
1167 | } | ||
1168 | |||
1169 | /* | ||
1170 | * Caller holds hw_lock. | ||
1171 | */ | ||
1172 | static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) | ||
1173 | { | ||
1174 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1175 | qdev->mem_map_registers; | ||
1176 | u32 bitToCheck = 0; | ||
1177 | u32 temp; | ||
1178 | |||
1179 | switch (qdev->mac_index) { | ||
1180 | case 0: | ||
1181 | bitToCheck = PORT_STATUS_AC0; | ||
1182 | break; | ||
1183 | case 1: | ||
1184 | bitToCheck = PORT_STATUS_AC1; | ||
1185 | break; | ||
1186 | } | ||
1187 | |||
1188 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | ||
1189 | if (temp & bitToCheck) { | ||
1190 | netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); | ||
1191 | return 1; | ||
1192 | } | ||
1193 | netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); | ||
1194 | return 0; | ||
1195 | } | ||
1196 | |||
1197 | /* | ||
1198 | * ql_is_neg_pause() returns 1 if pause was negotiated to be on | ||
1199 | */ | ||
1200 | static int ql_is_neg_pause(struct ql3_adapter *qdev) | ||
1201 | { | ||
1202 | if (ql_is_fiber(qdev)) | ||
1203 | return ql_is_petbi_neg_pause(qdev); | ||
1204 | else | ||
1205 | return ql_is_phy_neg_pause(qdev); | ||
1206 | } | ||
1207 | |||
1208 | static int ql_auto_neg_error(struct ql3_adapter *qdev) | ||
1209 | { | ||
1210 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1211 | qdev->mem_map_registers; | ||
1212 | u32 bitToCheck = 0; | ||
1213 | u32 temp; | ||
1214 | |||
1215 | switch (qdev->mac_index) { | ||
1216 | case 0: | ||
1217 | bitToCheck = PORT_STATUS_AE0; | ||
1218 | break; | ||
1219 | case 1: | ||
1220 | bitToCheck = PORT_STATUS_AE1; | ||
1221 | break; | ||
1222 | } | ||
1223 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | ||
1224 | return (temp & bitToCheck) != 0; | ||
1225 | } | ||
1226 | |||
1227 | static u32 ql_get_link_speed(struct ql3_adapter *qdev) | ||
1228 | { | ||
1229 | if (ql_is_fiber(qdev)) | ||
1230 | return SPEED_1000; | ||
1231 | else | ||
1232 | return ql_phy_get_speed(qdev); | ||
1233 | } | ||
1234 | |||
1235 | static int ql_is_link_full_dup(struct ql3_adapter *qdev) | ||
1236 | { | ||
1237 | if (ql_is_fiber(qdev)) | ||
1238 | return 1; | ||
1239 | else | ||
1240 | return ql_is_full_dup(qdev); | ||
1241 | } | ||
1242 | |||
1243 | /* | ||
1244 | * Caller holds hw_lock. | ||
1245 | */ | ||
1246 | static int ql_link_down_detect(struct ql3_adapter *qdev) | ||
1247 | { | ||
1248 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1249 | qdev->mem_map_registers; | ||
1250 | u32 bitToCheck = 0; | ||
1251 | u32 temp; | ||
1252 | |||
1253 | switch (qdev->mac_index) { | ||
1254 | case 0: | ||
1255 | bitToCheck = ISP_CONTROL_LINK_DN_0; | ||
1256 | break; | ||
1257 | case 1: | ||
1258 | bitToCheck = ISP_CONTROL_LINK_DN_1; | ||
1259 | break; | ||
1260 | } | ||
1261 | |||
1262 | temp = | ||
1263 | ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); | ||
1264 | return (temp & bitToCheck) != 0; | ||
1265 | } | ||
1266 | |||
1267 | /* | ||
1268 | * Caller holds hw_lock. | ||
1269 | */ | ||
1270 | static int ql_link_down_detect_clear(struct ql3_adapter *qdev) | ||
1271 | { | ||
1272 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1273 | qdev->mem_map_registers; | ||
1274 | |||
1275 | switch (qdev->mac_index) { | ||
1276 | case 0: | ||
1277 | ql_write_common_reg(qdev, | ||
1278 | &port_regs->CommonRegs.ispControlStatus, | ||
1279 | (ISP_CONTROL_LINK_DN_0) | | ||
1280 | (ISP_CONTROL_LINK_DN_0 << 16)); | ||
1281 | break; | ||
1282 | |||
1283 | case 1: | ||
1284 | ql_write_common_reg(qdev, | ||
1285 | &port_regs->CommonRegs.ispControlStatus, | ||
1286 | (ISP_CONTROL_LINK_DN_1) | | ||
1287 | (ISP_CONTROL_LINK_DN_1 << 16)); | ||
1288 | break; | ||
1289 | |||
1290 | default: | ||
1291 | return 1; | ||
1292 | } | ||
1293 | |||
1294 | return 0; | ||
1295 | } | ||
1296 | |||
1297 | /* | ||
1298 | * Caller holds hw_lock. | ||
1299 | */ | ||
1300 | static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) | ||
1301 | { | ||
1302 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1303 | qdev->mem_map_registers; | ||
1304 | u32 bitToCheck = 0; | ||
1305 | u32 temp; | ||
1306 | |||
1307 | switch (qdev->mac_index) { | ||
1308 | case 0: | ||
1309 | bitToCheck = PORT_STATUS_F1_ENABLED; | ||
1310 | break; | ||
1311 | case 1: | ||
1312 | bitToCheck = PORT_STATUS_F3_ENABLED; | ||
1313 | break; | ||
1314 | default: | ||
1315 | break; | ||
1316 | } | ||
1317 | |||
1318 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | ||
1319 | if (temp & bitToCheck) { | ||
1320 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, | ||
1321 | "not link master\n"); | ||
1322 | return 0; | ||
1323 | } | ||
1324 | |||
1325 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); | ||
1326 | return 1; | ||
1327 | } | ||
1328 | |||
1329 | static void ql_phy_reset_ex(struct ql3_adapter *qdev) | ||
1330 | { | ||
1331 | ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, | ||
1332 | PHYAddr[qdev->mac_index]); | ||
1333 | } | ||
1334 | |||
1335 | static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) | ||
1336 | { | ||
1337 | u16 reg; | ||
1338 | u16 portConfiguration; | ||
1339 | |||
1340 | if (qdev->phyType == PHY_AGERE_ET1011C) | ||
1341 | ql_mii_write_reg(qdev, 0x13, 0x0000); | ||
1342 | /* turn off external loopback */ | ||
1343 | |||
1344 | if (qdev->mac_index == 0) | ||
1345 | portConfiguration = | ||
1346 | qdev->nvram_data.macCfg_port0.portConfiguration; | ||
1347 | else | ||
1348 | portConfiguration = | ||
1349 | qdev->nvram_data.macCfg_port1.portConfiguration; | ||
1350 | |||
1351 | /* Some HBA's in the field are set to 0 and they need to | ||
1352 | be reinterpreted with a default value */ | ||
1353 | if (portConfiguration == 0) | ||
1354 | portConfiguration = PORT_CONFIG_DEFAULT; | ||
1355 | |||
1356 | /* Set the 1000 advertisements */ | ||
1357 | ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, | ||
1358 | PHYAddr[qdev->mac_index]); | ||
1359 | reg &= ~PHY_GIG_ALL_PARAMS; | ||
1360 | |||
1361 | if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { | ||
1362 | if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) | ||
1363 | reg |= PHY_GIG_ADV_1000F; | ||
1364 | else | ||
1365 | reg |= PHY_GIG_ADV_1000H; | ||
1366 | } | ||
1367 | |||
1368 | ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, | ||
1369 | PHYAddr[qdev->mac_index]); | ||
1370 | |||
1371 | /* Set the 10/100 & pause negotiation advertisements */ | ||
1372 | ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, | ||
1373 | PHYAddr[qdev->mac_index]); | ||
1374 | reg &= ~PHY_NEG_ALL_PARAMS; | ||
1375 | |||
1376 | if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) | ||
1377 | reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; | ||
1378 | |||
1379 | if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { | ||
1380 | if (portConfiguration & PORT_CONFIG_100MB_SPEED) | ||
1381 | reg |= PHY_NEG_ADV_100F; | ||
1382 | |||
1383 | if (portConfiguration & PORT_CONFIG_10MB_SPEED) | ||
1384 | reg |= PHY_NEG_ADV_10F; | ||
1385 | } | ||
1386 | |||
1387 | if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { | ||
1388 | if (portConfiguration & PORT_CONFIG_100MB_SPEED) | ||
1389 | reg |= PHY_NEG_ADV_100H; | ||
1390 | |||
1391 | if (portConfiguration & PORT_CONFIG_10MB_SPEED) | ||
1392 | reg |= PHY_NEG_ADV_10H; | ||
1393 | } | ||
1394 | |||
1395 | if (portConfiguration & PORT_CONFIG_1000MB_SPEED) | ||
1396 | reg |= 1; | ||
1397 | |||
1398 | ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, | ||
1399 | PHYAddr[qdev->mac_index]); | ||
1400 | |||
1401 | ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); | ||
1402 | |||
1403 | ql_mii_write_reg_ex(qdev, CONTROL_REG, | ||
1404 | reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, | ||
1405 | PHYAddr[qdev->mac_index]); | ||
1406 | } | ||
1407 | |||
1408 | static void ql_phy_init_ex(struct ql3_adapter *qdev) | ||
1409 | { | ||
1410 | ql_phy_reset_ex(qdev); | ||
1411 | PHY_Setup(qdev); | ||
1412 | ql_phy_start_neg_ex(qdev); | ||
1413 | } | ||
1414 | |||
1415 | /* | ||
1416 | * Caller holds hw_lock. | ||
1417 | */ | ||
1418 | static u32 ql_get_link_state(struct ql3_adapter *qdev) | ||
1419 | { | ||
1420 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1421 | qdev->mem_map_registers; | ||
1422 | u32 bitToCheck = 0; | ||
1423 | u32 temp, linkState; | ||
1424 | |||
1425 | switch (qdev->mac_index) { | ||
1426 | case 0: | ||
1427 | bitToCheck = PORT_STATUS_UP0; | ||
1428 | break; | ||
1429 | case 1: | ||
1430 | bitToCheck = PORT_STATUS_UP1; | ||
1431 | break; | ||
1432 | } | ||
1433 | |||
1434 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | ||
1435 | if (temp & bitToCheck) | ||
1436 | linkState = LS_UP; | ||
1437 | else | ||
1438 | linkState = LS_DOWN; | ||
1439 | |||
1440 | return linkState; | ||
1441 | } | ||
1442 | |||
1443 | static int ql_port_start(struct ql3_adapter *qdev) | ||
1444 | { | ||
1445 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | ||
1446 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | ||
1447 | 2) << 7)) { | ||
1448 | netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); | ||
1449 | return -1; | ||
1450 | } | ||
1451 | |||
1452 | if (ql_is_fiber(qdev)) { | ||
1453 | ql_petbi_init(qdev); | ||
1454 | } else { | ||
1455 | /* Copper port */ | ||
1456 | ql_phy_init_ex(qdev); | ||
1457 | } | ||
1458 | |||
1459 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | ||
1460 | return 0; | ||
1461 | } | ||
1462 | |||
1463 | static int ql_finish_auto_neg(struct ql3_adapter *qdev) | ||
1464 | { | ||
1465 | |||
1466 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | ||
1467 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | ||
1468 | 2) << 7)) | ||
1469 | return -1; | ||
1470 | |||
1471 | if (!ql_auto_neg_error(qdev)) { | ||
1472 | if (test_bit(QL_LINK_MASTER, &qdev->flags)) { | ||
1473 | /* configure the MAC */ | ||
1474 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, | ||
1475 | "Configuring link\n"); | ||
1476 | ql_mac_cfg_soft_reset(qdev, 1); | ||
1477 | ql_mac_cfg_gig(qdev, | ||
1478 | (ql_get_link_speed | ||
1479 | (qdev) == | ||
1480 | SPEED_1000)); | ||
1481 | ql_mac_cfg_full_dup(qdev, | ||
1482 | ql_is_link_full_dup | ||
1483 | (qdev)); | ||
1484 | ql_mac_cfg_pause(qdev, | ||
1485 | ql_is_neg_pause | ||
1486 | (qdev)); | ||
1487 | ql_mac_cfg_soft_reset(qdev, 0); | ||
1488 | |||
1489 | /* enable the MAC */ | ||
1490 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, | ||
1491 | "Enabling mac\n"); | ||
1492 | ql_mac_enable(qdev, 1); | ||
1493 | } | ||
1494 | |||
1495 | qdev->port_link_state = LS_UP; | ||
1496 | netif_start_queue(qdev->ndev); | ||
1497 | netif_carrier_on(qdev->ndev); | ||
1498 | netif_info(qdev, link, qdev->ndev, | ||
1499 | "Link is up at %d Mbps, %s duplex\n", | ||
1500 | ql_get_link_speed(qdev), | ||
1501 | ql_is_link_full_dup(qdev) ? "full" : "half"); | ||
1502 | |||
1503 | } else { /* Remote error detected */ | ||
1504 | |||
1505 | if (test_bit(QL_LINK_MASTER, &qdev->flags)) { | ||
1506 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, | ||
1507 | "Remote error detected. Calling ql_port_start()\n"); | ||
1508 | /* | ||
1509 | * ql_port_start() is shared code and needs | ||
1510 | * to lock the PHY on it's own. | ||
1511 | */ | ||
1512 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | ||
1513 | if (ql_port_start(qdev)) /* Restart port */ | ||
1514 | return -1; | ||
1515 | return 0; | ||
1516 | } | ||
1517 | } | ||
1518 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | ||
1519 | return 0; | ||
1520 | } | ||
1521 | |||
1522 | static void ql_link_state_machine_work(struct work_struct *work) | ||
1523 | { | ||
1524 | struct ql3_adapter *qdev = | ||
1525 | container_of(work, struct ql3_adapter, link_state_work.work); | ||
1526 | |||
1527 | u32 curr_link_state; | ||
1528 | unsigned long hw_flags; | ||
1529 | |||
1530 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
1531 | |||
1532 | curr_link_state = ql_get_link_state(qdev); | ||
1533 | |||
1534 | if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { | ||
1535 | netif_info(qdev, link, qdev->ndev, | ||
1536 | "Reset in progress, skip processing link state\n"); | ||
1537 | |||
1538 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
1539 | |||
1540 | /* Restart timer on 2 second interval. */ | ||
1541 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); | ||
1542 | |||
1543 | return; | ||
1544 | } | ||
1545 | |||
1546 | switch (qdev->port_link_state) { | ||
1547 | default: | ||
1548 | if (test_bit(QL_LINK_MASTER, &qdev->flags)) | ||
1549 | ql_port_start(qdev); | ||
1550 | qdev->port_link_state = LS_DOWN; | ||
1551 | /* Fall Through */ | ||
1552 | |||
1553 | case LS_DOWN: | ||
1554 | if (curr_link_state == LS_UP) { | ||
1555 | netif_info(qdev, link, qdev->ndev, "Link is up\n"); | ||
1556 | if (ql_is_auto_neg_complete(qdev)) | ||
1557 | ql_finish_auto_neg(qdev); | ||
1558 | |||
1559 | if (qdev->port_link_state == LS_UP) | ||
1560 | ql_link_down_detect_clear(qdev); | ||
1561 | |||
1562 | qdev->port_link_state = LS_UP; | ||
1563 | } | ||
1564 | break; | ||
1565 | |||
1566 | case LS_UP: | ||
1567 | /* | ||
1568 | * See if the link is currently down or went down and came | ||
1569 | * back up | ||
1570 | */ | ||
1571 | if (curr_link_state == LS_DOWN) { | ||
1572 | netif_info(qdev, link, qdev->ndev, "Link is down\n"); | ||
1573 | qdev->port_link_state = LS_DOWN; | ||
1574 | } | ||
1575 | if (ql_link_down_detect(qdev)) | ||
1576 | qdev->port_link_state = LS_DOWN; | ||
1577 | break; | ||
1578 | } | ||
1579 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
1580 | |||
1581 | /* Restart timer on 2 second interval. */ | ||
1582 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); | ||
1583 | } | ||
1584 | |||
1585 | /* | ||
1586 | * Caller must take hw_lock and QL_PHY_GIO_SEM. | ||
1587 | */ | ||
1588 | static void ql_get_phy_owner(struct ql3_adapter *qdev) | ||
1589 | { | ||
1590 | if (ql_this_adapter_controls_port(qdev)) | ||
1591 | set_bit(QL_LINK_MASTER, &qdev->flags); | ||
1592 | else | ||
1593 | clear_bit(QL_LINK_MASTER, &qdev->flags); | ||
1594 | } | ||
1595 | |||
1596 | /* | ||
1597 | * Caller must take hw_lock and QL_PHY_GIO_SEM. | ||
1598 | */ | ||
1599 | static void ql_init_scan_mode(struct ql3_adapter *qdev) | ||
1600 | { | ||
1601 | ql_mii_enable_scan_mode(qdev); | ||
1602 | |||
1603 | if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { | ||
1604 | if (ql_this_adapter_controls_port(qdev)) | ||
1605 | ql_petbi_init_ex(qdev); | ||
1606 | } else { | ||
1607 | if (ql_this_adapter_controls_port(qdev)) | ||
1608 | ql_phy_init_ex(qdev); | ||
1609 | } | ||
1610 | } | ||
1611 | |||
1612 | /* | ||
1613 | * MII_Setup needs to be called before taking the PHY out of reset | ||
1614 | * so that the management interface clock speed can be set properly. | ||
1615 | * It would be better if we had a way to disable MDC until after the | ||
1616 | * PHY is out of reset, but we don't have that capability. | ||
1617 | */ | ||
1618 | static int ql_mii_setup(struct ql3_adapter *qdev) | ||
1619 | { | ||
1620 | u32 reg; | ||
1621 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1622 | qdev->mem_map_registers; | ||
1623 | |||
1624 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | ||
1625 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | ||
1626 | 2) << 7)) | ||
1627 | return -1; | ||
1628 | |||
1629 | if (qdev->device_id == QL3032_DEVICE_ID) | ||
1630 | ql_write_page0_reg(qdev, | ||
1631 | &port_regs->macMIIMgmtControlReg, 0x0f00000); | ||
1632 | |||
1633 | /* Divide 125MHz clock by 28 to meet PHY timing requirements */ | ||
1634 | reg = MAC_MII_CONTROL_CLK_SEL_DIV28; | ||
1635 | |||
1636 | ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, | ||
1637 | reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); | ||
1638 | |||
1639 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | ||
1640 | return 0; | ||
1641 | } | ||
1642 | |||
1643 | #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ | ||
1644 | SUPPORTED_FIBRE | \ | ||
1645 | SUPPORTED_Autoneg) | ||
1646 | #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ | ||
1647 | SUPPORTED_10baseT_Full | \ | ||
1648 | SUPPORTED_100baseT_Half | \ | ||
1649 | SUPPORTED_100baseT_Full | \ | ||
1650 | SUPPORTED_1000baseT_Half | \ | ||
1651 | SUPPORTED_1000baseT_Full | \ | ||
1652 | SUPPORTED_Autoneg | \ | ||
1653 | SUPPORTED_TP) \ | ||
1654 | |||
1655 | static u32 ql_supported_modes(struct ql3_adapter *qdev) | ||
1656 | { | ||
1657 | if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) | ||
1658 | return SUPPORTED_OPTICAL_MODES; | ||
1659 | |||
1660 | return SUPPORTED_TP_MODES; | ||
1661 | } | ||
1662 | |||
1663 | static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) | ||
1664 | { | ||
1665 | int status; | ||
1666 | unsigned long hw_flags; | ||
1667 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
1668 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | ||
1669 | (QL_RESOURCE_BITS_BASE_CODE | | ||
1670 | (qdev->mac_index) * 2) << 7)) { | ||
1671 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
1672 | return 0; | ||
1673 | } | ||
1674 | status = ql_is_auto_cfg(qdev); | ||
1675 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | ||
1676 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
1677 | return status; | ||
1678 | } | ||
1679 | |||
1680 | static u32 ql_get_speed(struct ql3_adapter *qdev) | ||
1681 | { | ||
1682 | u32 status; | ||
1683 | unsigned long hw_flags; | ||
1684 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
1685 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | ||
1686 | (QL_RESOURCE_BITS_BASE_CODE | | ||
1687 | (qdev->mac_index) * 2) << 7)) { | ||
1688 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
1689 | return 0; | ||
1690 | } | ||
1691 | status = ql_get_link_speed(qdev); | ||
1692 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | ||
1693 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
1694 | return status; | ||
1695 | } | ||
1696 | |||
1697 | static int ql_get_full_dup(struct ql3_adapter *qdev) | ||
1698 | { | ||
1699 | int status; | ||
1700 | unsigned long hw_flags; | ||
1701 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
1702 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | ||
1703 | (QL_RESOURCE_BITS_BASE_CODE | | ||
1704 | (qdev->mac_index) * 2) << 7)) { | ||
1705 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
1706 | return 0; | ||
1707 | } | ||
1708 | status = ql_is_link_full_dup(qdev); | ||
1709 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | ||
1710 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
1711 | return status; | ||
1712 | } | ||
1713 | |||
1714 | static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) | ||
1715 | { | ||
1716 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
1717 | |||
1718 | ecmd->transceiver = XCVR_INTERNAL; | ||
1719 | ecmd->supported = ql_supported_modes(qdev); | ||
1720 | |||
1721 | if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { | ||
1722 | ecmd->port = PORT_FIBRE; | ||
1723 | } else { | ||
1724 | ecmd->port = PORT_TP; | ||
1725 | ecmd->phy_address = qdev->PHYAddr; | ||
1726 | } | ||
1727 | ecmd->advertising = ql_supported_modes(qdev); | ||
1728 | ecmd->autoneg = ql_get_auto_cfg_status(qdev); | ||
1729 | ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); | ||
1730 | ecmd->duplex = ql_get_full_dup(qdev); | ||
1731 | return 0; | ||
1732 | } | ||
1733 | |||
1734 | static void ql_get_drvinfo(struct net_device *ndev, | ||
1735 | struct ethtool_drvinfo *drvinfo) | ||
1736 | { | ||
1737 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
1738 | strncpy(drvinfo->driver, ql3xxx_driver_name, 32); | ||
1739 | strncpy(drvinfo->version, ql3xxx_driver_version, 32); | ||
1740 | strncpy(drvinfo->fw_version, "N/A", 32); | ||
1741 | strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); | ||
1742 | drvinfo->regdump_len = 0; | ||
1743 | drvinfo->eedump_len = 0; | ||
1744 | } | ||
1745 | |||
1746 | static u32 ql_get_msglevel(struct net_device *ndev) | ||
1747 | { | ||
1748 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
1749 | return qdev->msg_enable; | ||
1750 | } | ||
1751 | |||
1752 | static void ql_set_msglevel(struct net_device *ndev, u32 value) | ||
1753 | { | ||
1754 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
1755 | qdev->msg_enable = value; | ||
1756 | } | ||
1757 | |||
1758 | static void ql_get_pauseparam(struct net_device *ndev, | ||
1759 | struct ethtool_pauseparam *pause) | ||
1760 | { | ||
1761 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
1762 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1763 | qdev->mem_map_registers; | ||
1764 | |||
1765 | u32 reg; | ||
1766 | if (qdev->mac_index == 0) | ||
1767 | reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); | ||
1768 | else | ||
1769 | reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); | ||
1770 | |||
1771 | pause->autoneg = ql_get_auto_cfg_status(qdev); | ||
1772 | pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; | ||
1773 | pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; | ||
1774 | } | ||
1775 | |||
1776 | static const struct ethtool_ops ql3xxx_ethtool_ops = { | ||
1777 | .get_settings = ql_get_settings, | ||
1778 | .get_drvinfo = ql_get_drvinfo, | ||
1779 | .get_link = ethtool_op_get_link, | ||
1780 | .get_msglevel = ql_get_msglevel, | ||
1781 | .set_msglevel = ql_set_msglevel, | ||
1782 | .get_pauseparam = ql_get_pauseparam, | ||
1783 | }; | ||
1784 | |||
1785 | static int ql_populate_free_queue(struct ql3_adapter *qdev) | ||
1786 | { | ||
1787 | struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; | ||
1788 | dma_addr_t map; | ||
1789 | int err; | ||
1790 | |||
1791 | while (lrg_buf_cb) { | ||
1792 | if (!lrg_buf_cb->skb) { | ||
1793 | lrg_buf_cb->skb = | ||
1794 | netdev_alloc_skb(qdev->ndev, | ||
1795 | qdev->lrg_buffer_len); | ||
1796 | if (unlikely(!lrg_buf_cb->skb)) { | ||
1797 | netdev_printk(KERN_DEBUG, qdev->ndev, | ||
1798 | "Failed netdev_alloc_skb()\n"); | ||
1799 | break; | ||
1800 | } else { | ||
1801 | /* | ||
1802 | * We save some space to copy the ethhdr from | ||
1803 | * first buffer | ||
1804 | */ | ||
1805 | skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); | ||
1806 | map = pci_map_single(qdev->pdev, | ||
1807 | lrg_buf_cb->skb->data, | ||
1808 | qdev->lrg_buffer_len - | ||
1809 | QL_HEADER_SPACE, | ||
1810 | PCI_DMA_FROMDEVICE); | ||
1811 | |||
1812 | err = pci_dma_mapping_error(qdev->pdev, map); | ||
1813 | if (err) { | ||
1814 | netdev_err(qdev->ndev, | ||
1815 | "PCI mapping failed with error: %d\n", | ||
1816 | err); | ||
1817 | dev_kfree_skb(lrg_buf_cb->skb); | ||
1818 | lrg_buf_cb->skb = NULL; | ||
1819 | break; | ||
1820 | } | ||
1821 | |||
1822 | |||
1823 | lrg_buf_cb->buf_phy_addr_low = | ||
1824 | cpu_to_le32(LS_64BITS(map)); | ||
1825 | lrg_buf_cb->buf_phy_addr_high = | ||
1826 | cpu_to_le32(MS_64BITS(map)); | ||
1827 | dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); | ||
1828 | dma_unmap_len_set(lrg_buf_cb, maplen, | ||
1829 | qdev->lrg_buffer_len - | ||
1830 | QL_HEADER_SPACE); | ||
1831 | --qdev->lrg_buf_skb_check; | ||
1832 | if (!qdev->lrg_buf_skb_check) | ||
1833 | return 1; | ||
1834 | } | ||
1835 | } | ||
1836 | lrg_buf_cb = lrg_buf_cb->next; | ||
1837 | } | ||
1838 | return 0; | ||
1839 | } | ||
1840 | |||
1841 | /* | ||
1842 | * Caller holds hw_lock. | ||
1843 | */ | ||
1844 | static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) | ||
1845 | { | ||
1846 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1847 | qdev->mem_map_registers; | ||
1848 | |||
1849 | if (qdev->small_buf_release_cnt >= 16) { | ||
1850 | while (qdev->small_buf_release_cnt >= 16) { | ||
1851 | qdev->small_buf_q_producer_index++; | ||
1852 | |||
1853 | if (qdev->small_buf_q_producer_index == | ||
1854 | NUM_SBUFQ_ENTRIES) | ||
1855 | qdev->small_buf_q_producer_index = 0; | ||
1856 | qdev->small_buf_release_cnt -= 8; | ||
1857 | } | ||
1858 | wmb(); | ||
1859 | writel(qdev->small_buf_q_producer_index, | ||
1860 | &port_regs->CommonRegs.rxSmallQProducerIndex); | ||
1861 | } | ||
1862 | } | ||
1863 | |||
1864 | /* | ||
1865 | * Caller holds hw_lock. | ||
1866 | */ | ||
1867 | static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) | ||
1868 | { | ||
1869 | struct bufq_addr_element *lrg_buf_q_ele; | ||
1870 | int i; | ||
1871 | struct ql_rcv_buf_cb *lrg_buf_cb; | ||
1872 | struct ql3xxx_port_registers __iomem *port_regs = | ||
1873 | qdev->mem_map_registers; | ||
1874 | |||
1875 | if ((qdev->lrg_buf_free_count >= 8) && | ||
1876 | (qdev->lrg_buf_release_cnt >= 16)) { | ||
1877 | |||
1878 | if (qdev->lrg_buf_skb_check) | ||
1879 | if (!ql_populate_free_queue(qdev)) | ||
1880 | return; | ||
1881 | |||
1882 | lrg_buf_q_ele = qdev->lrg_buf_next_free; | ||
1883 | |||
1884 | while ((qdev->lrg_buf_release_cnt >= 16) && | ||
1885 | (qdev->lrg_buf_free_count >= 8)) { | ||
1886 | |||
1887 | for (i = 0; i < 8; i++) { | ||
1888 | lrg_buf_cb = | ||
1889 | ql_get_from_lrg_buf_free_list(qdev); | ||
1890 | lrg_buf_q_ele->addr_high = | ||
1891 | lrg_buf_cb->buf_phy_addr_high; | ||
1892 | lrg_buf_q_ele->addr_low = | ||
1893 | lrg_buf_cb->buf_phy_addr_low; | ||
1894 | lrg_buf_q_ele++; | ||
1895 | |||
1896 | qdev->lrg_buf_release_cnt--; | ||
1897 | } | ||
1898 | |||
1899 | qdev->lrg_buf_q_producer_index++; | ||
1900 | |||
1901 | if (qdev->lrg_buf_q_producer_index == | ||
1902 | qdev->num_lbufq_entries) | ||
1903 | qdev->lrg_buf_q_producer_index = 0; | ||
1904 | |||
1905 | if (qdev->lrg_buf_q_producer_index == | ||
1906 | (qdev->num_lbufq_entries - 1)) { | ||
1907 | lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; | ||
1908 | } | ||
1909 | } | ||
1910 | wmb(); | ||
1911 | qdev->lrg_buf_next_free = lrg_buf_q_ele; | ||
1912 | writel(qdev->lrg_buf_q_producer_index, | ||
1913 | &port_regs->CommonRegs.rxLargeQProducerIndex); | ||
1914 | } | ||
1915 | } | ||
1916 | |||
1917 | static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, | ||
1918 | struct ob_mac_iocb_rsp *mac_rsp) | ||
1919 | { | ||
1920 | struct ql_tx_buf_cb *tx_cb; | ||
1921 | int i; | ||
1922 | int retval = 0; | ||
1923 | |||
1924 | if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { | ||
1925 | netdev_warn(qdev->ndev, | ||
1926 | "Frame too short but it was padded and sent\n"); | ||
1927 | } | ||
1928 | |||
1929 | tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; | ||
1930 | |||
1931 | /* Check the transmit response flags for any errors */ | ||
1932 | if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { | ||
1933 | netdev_err(qdev->ndev, | ||
1934 | "Frame too short to be legal, frame not sent\n"); | ||
1935 | |||
1936 | qdev->ndev->stats.tx_errors++; | ||
1937 | retval = -EIO; | ||
1938 | goto frame_not_sent; | ||
1939 | } | ||
1940 | |||
1941 | if (tx_cb->seg_count == 0) { | ||
1942 | netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", | ||
1943 | mac_rsp->transaction_id); | ||
1944 | |||
1945 | qdev->ndev->stats.tx_errors++; | ||
1946 | retval = -EIO; | ||
1947 | goto invalid_seg_count; | ||
1948 | } | ||
1949 | |||
1950 | pci_unmap_single(qdev->pdev, | ||
1951 | dma_unmap_addr(&tx_cb->map[0], mapaddr), | ||
1952 | dma_unmap_len(&tx_cb->map[0], maplen), | ||
1953 | PCI_DMA_TODEVICE); | ||
1954 | tx_cb->seg_count--; | ||
1955 | if (tx_cb->seg_count) { | ||
1956 | for (i = 1; i < tx_cb->seg_count; i++) { | ||
1957 | pci_unmap_page(qdev->pdev, | ||
1958 | dma_unmap_addr(&tx_cb->map[i], | ||
1959 | mapaddr), | ||
1960 | dma_unmap_len(&tx_cb->map[i], maplen), | ||
1961 | PCI_DMA_TODEVICE); | ||
1962 | } | ||
1963 | } | ||
1964 | qdev->ndev->stats.tx_packets++; | ||
1965 | qdev->ndev->stats.tx_bytes += tx_cb->skb->len; | ||
1966 | |||
1967 | frame_not_sent: | ||
1968 | dev_kfree_skb_irq(tx_cb->skb); | ||
1969 | tx_cb->skb = NULL; | ||
1970 | |||
1971 | invalid_seg_count: | ||
1972 | atomic_inc(&qdev->tx_count); | ||
1973 | } | ||
1974 | |||
1975 | static void ql_get_sbuf(struct ql3_adapter *qdev) | ||
1976 | { | ||
1977 | if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) | ||
1978 | qdev->small_buf_index = 0; | ||
1979 | qdev->small_buf_release_cnt++; | ||
1980 | } | ||
1981 | |||
1982 | static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) | ||
1983 | { | ||
1984 | struct ql_rcv_buf_cb *lrg_buf_cb = NULL; | ||
1985 | lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; | ||
1986 | qdev->lrg_buf_release_cnt++; | ||
1987 | if (++qdev->lrg_buf_index == qdev->num_large_buffers) | ||
1988 | qdev->lrg_buf_index = 0; | ||
1989 | return lrg_buf_cb; | ||
1990 | } | ||
1991 | |||
1992 | /* | ||
1993 | * The difference between 3022 and 3032 for inbound completions: | ||
1994 | * 3022 uses two buffers per completion. The first buffer contains | ||
1995 | * (some) header info, the second the remainder of the headers plus | ||
1996 | * the data. For this chip we reserve some space at the top of the | ||
1997 | * receive buffer so that the header info in buffer one can be | ||
1998 | * prepended to the buffer two. Buffer two is the sent up while | ||
1999 | * buffer one is returned to the hardware to be reused. | ||
2000 | * 3032 receives all of it's data and headers in one buffer for a | ||
2001 | * simpler process. 3032 also supports checksum verification as | ||
2002 | * can be seen in ql_process_macip_rx_intr(). | ||
2003 | */ | ||
2004 | static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, | ||
2005 | struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) | ||
2006 | { | ||
2007 | struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; | ||
2008 | struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; | ||
2009 | struct sk_buff *skb; | ||
2010 | u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); | ||
2011 | |||
2012 | /* | ||
2013 | * Get the inbound address list (small buffer). | ||
2014 | */ | ||
2015 | ql_get_sbuf(qdev); | ||
2016 | |||
2017 | if (qdev->device_id == QL3022_DEVICE_ID) | ||
2018 | lrg_buf_cb1 = ql_get_lbuf(qdev); | ||
2019 | |||
2020 | /* start of second buffer */ | ||
2021 | lrg_buf_cb2 = ql_get_lbuf(qdev); | ||
2022 | skb = lrg_buf_cb2->skb; | ||
2023 | |||
2024 | qdev->ndev->stats.rx_packets++; | ||
2025 | qdev->ndev->stats.rx_bytes += length; | ||
2026 | |||
2027 | skb_put(skb, length); | ||
2028 | pci_unmap_single(qdev->pdev, | ||
2029 | dma_unmap_addr(lrg_buf_cb2, mapaddr), | ||
2030 | dma_unmap_len(lrg_buf_cb2, maplen), | ||
2031 | PCI_DMA_FROMDEVICE); | ||
2032 | prefetch(skb->data); | ||
2033 | skb_checksum_none_assert(skb); | ||
2034 | skb->protocol = eth_type_trans(skb, qdev->ndev); | ||
2035 | |||
2036 | netif_receive_skb(skb); | ||
2037 | lrg_buf_cb2->skb = NULL; | ||
2038 | |||
2039 | if (qdev->device_id == QL3022_DEVICE_ID) | ||
2040 | ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); | ||
2041 | ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); | ||
2042 | } | ||
2043 | |||
2044 | static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, | ||
2045 | struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) | ||
2046 | { | ||
2047 | struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; | ||
2048 | struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; | ||
2049 | struct sk_buff *skb1 = NULL, *skb2; | ||
2050 | struct net_device *ndev = qdev->ndev; | ||
2051 | u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); | ||
2052 | u16 size = 0; | ||
2053 | |||
2054 | /* | ||
2055 | * Get the inbound address list (small buffer). | ||
2056 | */ | ||
2057 | |||
2058 | ql_get_sbuf(qdev); | ||
2059 | |||
2060 | if (qdev->device_id == QL3022_DEVICE_ID) { | ||
2061 | /* start of first buffer on 3022 */ | ||
2062 | lrg_buf_cb1 = ql_get_lbuf(qdev); | ||
2063 | skb1 = lrg_buf_cb1->skb; | ||
2064 | size = ETH_HLEN; | ||
2065 | if (*((u16 *) skb1->data) != 0xFFFF) | ||
2066 | size += VLAN_ETH_HLEN - ETH_HLEN; | ||
2067 | } | ||
2068 | |||
2069 | /* start of second buffer */ | ||
2070 | lrg_buf_cb2 = ql_get_lbuf(qdev); | ||
2071 | skb2 = lrg_buf_cb2->skb; | ||
2072 | |||
2073 | skb_put(skb2, length); /* Just the second buffer length here. */ | ||
2074 | pci_unmap_single(qdev->pdev, | ||
2075 | dma_unmap_addr(lrg_buf_cb2, mapaddr), | ||
2076 | dma_unmap_len(lrg_buf_cb2, maplen), | ||
2077 | PCI_DMA_FROMDEVICE); | ||
2078 | prefetch(skb2->data); | ||
2079 | |||
2080 | skb_checksum_none_assert(skb2); | ||
2081 | if (qdev->device_id == QL3022_DEVICE_ID) { | ||
2082 | /* | ||
2083 | * Copy the ethhdr from first buffer to second. This | ||
2084 | * is necessary for 3022 IP completions. | ||
2085 | */ | ||
2086 | skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, | ||
2087 | skb_push(skb2, size), size); | ||
2088 | } else { | ||
2089 | u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); | ||
2090 | if (checksum & | ||
2091 | (IB_IP_IOCB_RSP_3032_ICE | | ||
2092 | IB_IP_IOCB_RSP_3032_CE)) { | ||
2093 | netdev_err(ndev, | ||
2094 | "%s: Bad checksum for this %s packet, checksum = %x\n", | ||
2095 | __func__, | ||
2096 | ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? | ||
2097 | "TCP" : "UDP"), checksum); | ||
2098 | } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || | ||
2099 | (checksum & IB_IP_IOCB_RSP_3032_UDP && | ||
2100 | !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { | ||
2101 | skb2->ip_summed = CHECKSUM_UNNECESSARY; | ||
2102 | } | ||
2103 | } | ||
2104 | skb2->protocol = eth_type_trans(skb2, qdev->ndev); | ||
2105 | |||
2106 | netif_receive_skb(skb2); | ||
2107 | ndev->stats.rx_packets++; | ||
2108 | ndev->stats.rx_bytes += length; | ||
2109 | lrg_buf_cb2->skb = NULL; | ||
2110 | |||
2111 | if (qdev->device_id == QL3022_DEVICE_ID) | ||
2112 | ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); | ||
2113 | ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); | ||
2114 | } | ||
2115 | |||
2116 | static int ql_tx_rx_clean(struct ql3_adapter *qdev, | ||
2117 | int *tx_cleaned, int *rx_cleaned, int work_to_do) | ||
2118 | { | ||
2119 | struct net_rsp_iocb *net_rsp; | ||
2120 | struct net_device *ndev = qdev->ndev; | ||
2121 | int work_done = 0; | ||
2122 | |||
2123 | /* While there are entries in the completion queue. */ | ||
2124 | while ((le32_to_cpu(*(qdev->prsp_producer_index)) != | ||
2125 | qdev->rsp_consumer_index) && (work_done < work_to_do)) { | ||
2126 | |||
2127 | net_rsp = qdev->rsp_current; | ||
2128 | rmb(); | ||
2129 | /* | ||
2130 | * Fix 4032 chip's undocumented "feature" where bit-8 is set | ||
2131 | * if the inbound completion is for a VLAN. | ||
2132 | */ | ||
2133 | if (qdev->device_id == QL3032_DEVICE_ID) | ||
2134 | net_rsp->opcode &= 0x7f; | ||
2135 | switch (net_rsp->opcode) { | ||
2136 | |||
2137 | case OPCODE_OB_MAC_IOCB_FN0: | ||
2138 | case OPCODE_OB_MAC_IOCB_FN2: | ||
2139 | ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) | ||
2140 | net_rsp); | ||
2141 | (*tx_cleaned)++; | ||
2142 | break; | ||
2143 | |||
2144 | case OPCODE_IB_MAC_IOCB: | ||
2145 | case OPCODE_IB_3032_MAC_IOCB: | ||
2146 | ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) | ||
2147 | net_rsp); | ||
2148 | (*rx_cleaned)++; | ||
2149 | break; | ||
2150 | |||
2151 | case OPCODE_IB_IP_IOCB: | ||
2152 | case OPCODE_IB_3032_IP_IOCB: | ||
2153 | ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) | ||
2154 | net_rsp); | ||
2155 | (*rx_cleaned)++; | ||
2156 | break; | ||
2157 | default: { | ||
2158 | u32 *tmp = (u32 *)net_rsp; | ||
2159 | netdev_err(ndev, | ||
2160 | "Hit default case, not handled!\n" | ||
2161 | " dropping the packet, opcode = %x\n" | ||
2162 | "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", | ||
2163 | net_rsp->opcode, | ||
2164 | (unsigned long int)tmp[0], | ||
2165 | (unsigned long int)tmp[1], | ||
2166 | (unsigned long int)tmp[2], | ||
2167 | (unsigned long int)tmp[3]); | ||
2168 | } | ||
2169 | } | ||
2170 | |||
2171 | qdev->rsp_consumer_index++; | ||
2172 | |||
2173 | if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { | ||
2174 | qdev->rsp_consumer_index = 0; | ||
2175 | qdev->rsp_current = qdev->rsp_q_virt_addr; | ||
2176 | } else { | ||
2177 | qdev->rsp_current++; | ||
2178 | } | ||
2179 | |||
2180 | work_done = *tx_cleaned + *rx_cleaned; | ||
2181 | } | ||
2182 | |||
2183 | return work_done; | ||
2184 | } | ||
2185 | |||
2186 | static int ql_poll(struct napi_struct *napi, int budget) | ||
2187 | { | ||
2188 | struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); | ||
2189 | int rx_cleaned = 0, tx_cleaned = 0; | ||
2190 | unsigned long hw_flags; | ||
2191 | struct ql3xxx_port_registers __iomem *port_regs = | ||
2192 | qdev->mem_map_registers; | ||
2193 | |||
2194 | ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); | ||
2195 | |||
2196 | if (tx_cleaned + rx_cleaned != budget) { | ||
2197 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
2198 | __napi_complete(napi); | ||
2199 | ql_update_small_bufq_prod_index(qdev); | ||
2200 | ql_update_lrg_bufq_prod_index(qdev); | ||
2201 | writel(qdev->rsp_consumer_index, | ||
2202 | &port_regs->CommonRegs.rspQConsumerIndex); | ||
2203 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
2204 | |||
2205 | ql_enable_interrupts(qdev); | ||
2206 | } | ||
2207 | return tx_cleaned + rx_cleaned; | ||
2208 | } | ||
2209 | |||
2210 | static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | ||
2211 | { | ||
2212 | |||
2213 | struct net_device *ndev = dev_id; | ||
2214 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
2215 | struct ql3xxx_port_registers __iomem *port_regs = | ||
2216 | qdev->mem_map_registers; | ||
2217 | u32 value; | ||
2218 | int handled = 1; | ||
2219 | u32 var; | ||
2220 | |||
2221 | value = ql_read_common_reg_l(qdev, | ||
2222 | &port_regs->CommonRegs.ispControlStatus); | ||
2223 | |||
2224 | if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { | ||
2225 | spin_lock(&qdev->adapter_lock); | ||
2226 | netif_stop_queue(qdev->ndev); | ||
2227 | netif_carrier_off(qdev->ndev); | ||
2228 | ql_disable_interrupts(qdev); | ||
2229 | qdev->port_link_state = LS_DOWN; | ||
2230 | set_bit(QL_RESET_ACTIVE, &qdev->flags) ; | ||
2231 | |||
2232 | if (value & ISP_CONTROL_FE) { | ||
2233 | /* | ||
2234 | * Chip Fatal Error. | ||
2235 | */ | ||
2236 | var = | ||
2237 | ql_read_page0_reg_l(qdev, | ||
2238 | &port_regs->PortFatalErrStatus); | ||
2239 | netdev_warn(ndev, | ||
2240 | "Resetting chip. PortFatalErrStatus register = 0x%x\n", | ||
2241 | var); | ||
2242 | set_bit(QL_RESET_START, &qdev->flags) ; | ||
2243 | } else { | ||
2244 | /* | ||
2245 | * Soft Reset Requested. | ||
2246 | */ | ||
2247 | set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; | ||
2248 | netdev_err(ndev, | ||
2249 | "Another function issued a reset to the chip. ISR value = %x\n", | ||
2250 | value); | ||
2251 | } | ||
2252 | queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); | ||
2253 | spin_unlock(&qdev->adapter_lock); | ||
2254 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { | ||
2255 | ql_disable_interrupts(qdev); | ||
2256 | if (likely(napi_schedule_prep(&qdev->napi))) | ||
2257 | __napi_schedule(&qdev->napi); | ||
2258 | } else | ||
2259 | return IRQ_NONE; | ||
2260 | |||
2261 | return IRQ_RETVAL(handled); | ||
2262 | } | ||
2263 | |||
2264 | /* | ||
2265 | * Get the total number of segments needed for the given number of fragments. | ||
2266 | * This is necessary because outbound address lists (OAL) will be used when | ||
2267 | * more than two frags are given. Each address list has 5 addr/len pairs. | ||
2268 | * The 5th pair in each OAL is used to point to the next OAL if more frags | ||
2269 | * are coming. That is why the frags:segment count ratio is not linear. | ||
2270 | */ | ||
2271 | static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) | ||
2272 | { | ||
2273 | if (qdev->device_id == QL3022_DEVICE_ID) | ||
2274 | return 1; | ||
2275 | |||
2276 | if (frags <= 2) | ||
2277 | return frags + 1; | ||
2278 | else if (frags <= 6) | ||
2279 | return frags + 2; | ||
2280 | else if (frags <= 10) | ||
2281 | return frags + 3; | ||
2282 | else if (frags <= 14) | ||
2283 | return frags + 4; | ||
2284 | else if (frags <= 18) | ||
2285 | return frags + 5; | ||
2286 | return -1; | ||
2287 | } | ||
2288 | |||
2289 | static void ql_hw_csum_setup(const struct sk_buff *skb, | ||
2290 | struct ob_mac_iocb_req *mac_iocb_ptr) | ||
2291 | { | ||
2292 | const struct iphdr *ip = ip_hdr(skb); | ||
2293 | |||
2294 | mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); | ||
2295 | mac_iocb_ptr->ip_hdr_len = ip->ihl; | ||
2296 | |||
2297 | if (ip->protocol == IPPROTO_TCP) { | ||
2298 | mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | | ||
2299 | OB_3032MAC_IOCB_REQ_IC; | ||
2300 | } else { | ||
2301 | mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | | ||
2302 | OB_3032MAC_IOCB_REQ_IC; | ||
2303 | } | ||
2304 | |||
2305 | } | ||
2306 | |||
2307 | /* | ||
2308 | * Map the buffers for this transmit. | ||
2309 | * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. | ||
2310 | */ | ||
2311 | static int ql_send_map(struct ql3_adapter *qdev, | ||
2312 | struct ob_mac_iocb_req *mac_iocb_ptr, | ||
2313 | struct ql_tx_buf_cb *tx_cb, | ||
2314 | struct sk_buff *skb) | ||
2315 | { | ||
2316 | struct oal *oal; | ||
2317 | struct oal_entry *oal_entry; | ||
2318 | int len = skb_headlen(skb); | ||
2319 | dma_addr_t map; | ||
2320 | int err; | ||
2321 | int completed_segs, i; | ||
2322 | int seg_cnt, seg = 0; | ||
2323 | int frag_cnt = (int)skb_shinfo(skb)->nr_frags; | ||
2324 | |||
2325 | seg_cnt = tx_cb->seg_count; | ||
2326 | /* | ||
2327 | * Map the skb buffer first. | ||
2328 | */ | ||
2329 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
2330 | |||
2331 | err = pci_dma_mapping_error(qdev->pdev, map); | ||
2332 | if (err) { | ||
2333 | netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", | ||
2334 | err); | ||
2335 | |||
2336 | return NETDEV_TX_BUSY; | ||
2337 | } | ||
2338 | |||
2339 | oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; | ||
2340 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); | ||
2341 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | ||
2342 | oal_entry->len = cpu_to_le32(len); | ||
2343 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); | ||
2344 | dma_unmap_len_set(&tx_cb->map[seg], maplen, len); | ||
2345 | seg++; | ||
2346 | |||
2347 | if (seg_cnt == 1) { | ||
2348 | /* Terminate the last segment. */ | ||
2349 | oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); | ||
2350 | return NETDEV_TX_OK; | ||
2351 | } | ||
2352 | oal = tx_cb->oal; | ||
2353 | for (completed_segs = 0; | ||
2354 | completed_segs < frag_cnt; | ||
2355 | completed_segs++, seg++) { | ||
2356 | skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; | ||
2357 | oal_entry++; | ||
2358 | /* | ||
2359 | * Check for continuation requirements. | ||
2360 | * It's strange but necessary. | ||
2361 | * Continuation entry points to outbound address list. | ||
2362 | */ | ||
2363 | if ((seg == 2 && seg_cnt > 3) || | ||
2364 | (seg == 7 && seg_cnt > 8) || | ||
2365 | (seg == 12 && seg_cnt > 13) || | ||
2366 | (seg == 17 && seg_cnt > 18)) { | ||
2367 | map = pci_map_single(qdev->pdev, oal, | ||
2368 | sizeof(struct oal), | ||
2369 | PCI_DMA_TODEVICE); | ||
2370 | |||
2371 | err = pci_dma_mapping_error(qdev->pdev, map); | ||
2372 | if (err) { | ||
2373 | netdev_err(qdev->ndev, | ||
2374 | "PCI mapping outbound address list with error: %d\n", | ||
2375 | err); | ||
2376 | goto map_error; | ||
2377 | } | ||
2378 | |||
2379 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); | ||
2380 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | ||
2381 | oal_entry->len = cpu_to_le32(sizeof(struct oal) | | ||
2382 | OAL_CONT_ENTRY); | ||
2383 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); | ||
2384 | dma_unmap_len_set(&tx_cb->map[seg], maplen, | ||
2385 | sizeof(struct oal)); | ||
2386 | oal_entry = (struct oal_entry *)oal; | ||
2387 | oal++; | ||
2388 | seg++; | ||
2389 | } | ||
2390 | |||
2391 | map = pci_map_page(qdev->pdev, frag->page, | ||
2392 | frag->page_offset, frag->size, | ||
2393 | PCI_DMA_TODEVICE); | ||
2394 | |||
2395 | err = pci_dma_mapping_error(qdev->pdev, map); | ||
2396 | if (err) { | ||
2397 | netdev_err(qdev->ndev, | ||
2398 | "PCI mapping frags failed with error: %d\n", | ||
2399 | err); | ||
2400 | goto map_error; | ||
2401 | } | ||
2402 | |||
2403 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); | ||
2404 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | ||
2405 | oal_entry->len = cpu_to_le32(frag->size); | ||
2406 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); | ||
2407 | dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); | ||
2408 | } | ||
2409 | /* Terminate the last segment. */ | ||
2410 | oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); | ||
2411 | return NETDEV_TX_OK; | ||
2412 | |||
2413 | map_error: | ||
2414 | /* A PCI mapping failed and now we will need to back out | ||
2415 | * We need to traverse through the oal's and associated pages which | ||
2416 | * have been mapped and now we must unmap them to clean up properly | ||
2417 | */ | ||
2418 | |||
2419 | seg = 1; | ||
2420 | oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; | ||
2421 | oal = tx_cb->oal; | ||
2422 | for (i = 0; i < completed_segs; i++, seg++) { | ||
2423 | oal_entry++; | ||
2424 | |||
2425 | /* | ||
2426 | * Check for continuation requirements. | ||
2427 | * It's strange but necessary. | ||
2428 | */ | ||
2429 | |||
2430 | if ((seg == 2 && seg_cnt > 3) || | ||
2431 | (seg == 7 && seg_cnt > 8) || | ||
2432 | (seg == 12 && seg_cnt > 13) || | ||
2433 | (seg == 17 && seg_cnt > 18)) { | ||
2434 | pci_unmap_single(qdev->pdev, | ||
2435 | dma_unmap_addr(&tx_cb->map[seg], mapaddr), | ||
2436 | dma_unmap_len(&tx_cb->map[seg], maplen), | ||
2437 | PCI_DMA_TODEVICE); | ||
2438 | oal++; | ||
2439 | seg++; | ||
2440 | } | ||
2441 | |||
2442 | pci_unmap_page(qdev->pdev, | ||
2443 | dma_unmap_addr(&tx_cb->map[seg], mapaddr), | ||
2444 | dma_unmap_len(&tx_cb->map[seg], maplen), | ||
2445 | PCI_DMA_TODEVICE); | ||
2446 | } | ||
2447 | |||
2448 | pci_unmap_single(qdev->pdev, | ||
2449 | dma_unmap_addr(&tx_cb->map[0], mapaddr), | ||
2450 | dma_unmap_addr(&tx_cb->map[0], maplen), | ||
2451 | PCI_DMA_TODEVICE); | ||
2452 | |||
2453 | return NETDEV_TX_BUSY; | ||
2454 | |||
2455 | } | ||
2456 | |||
2457 | /* | ||
2458 | * The difference between 3022 and 3032 sends: | ||
2459 | * 3022 only supports a simple single segment transmission. | ||
2460 | * 3032 supports checksumming and scatter/gather lists (fragments). | ||
2461 | * The 3032 supports sglists by using the 3 addr/len pairs (ALP) | ||
2462 | * in the IOCB plus a chain of outbound address lists (OAL) that | ||
2463 | * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) | ||
2464 | * will be used to point to an OAL when more ALP entries are required. | ||
2465 | * The IOCB is always the top of the chain followed by one or more | ||
2466 | * OALs (when necessary). | ||
2467 | */ | ||
2468 | static netdev_tx_t ql3xxx_send(struct sk_buff *skb, | ||
2469 | struct net_device *ndev) | ||
2470 | { | ||
2471 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
2472 | struct ql3xxx_port_registers __iomem *port_regs = | ||
2473 | qdev->mem_map_registers; | ||
2474 | struct ql_tx_buf_cb *tx_cb; | ||
2475 | u32 tot_len = skb->len; | ||
2476 | struct ob_mac_iocb_req *mac_iocb_ptr; | ||
2477 | |||
2478 | if (unlikely(atomic_read(&qdev->tx_count) < 2)) | ||
2479 | return NETDEV_TX_BUSY; | ||
2480 | |||
2481 | tx_cb = &qdev->tx_buf[qdev->req_producer_index]; | ||
2482 | tx_cb->seg_count = ql_get_seg_count(qdev, | ||
2483 | skb_shinfo(skb)->nr_frags); | ||
2484 | if (tx_cb->seg_count == -1) { | ||
2485 | netdev_err(ndev, "%s: invalid segment count!\n", __func__); | ||
2486 | return NETDEV_TX_OK; | ||
2487 | } | ||
2488 | |||
2489 | mac_iocb_ptr = tx_cb->queue_entry; | ||
2490 | memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); | ||
2491 | mac_iocb_ptr->opcode = qdev->mac_ob_opcode; | ||
2492 | mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; | ||
2493 | mac_iocb_ptr->flags |= qdev->mb_bit_mask; | ||
2494 | mac_iocb_ptr->transaction_id = qdev->req_producer_index; | ||
2495 | mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); | ||
2496 | tx_cb->skb = skb; | ||
2497 | if (qdev->device_id == QL3032_DEVICE_ID && | ||
2498 | skb->ip_summed == CHECKSUM_PARTIAL) | ||
2499 | ql_hw_csum_setup(skb, mac_iocb_ptr); | ||
2500 | |||
2501 | if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { | ||
2502 | netdev_err(ndev, "%s: Could not map the segments!\n", __func__); | ||
2503 | return NETDEV_TX_BUSY; | ||
2504 | } | ||
2505 | |||
2506 | wmb(); | ||
2507 | qdev->req_producer_index++; | ||
2508 | if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) | ||
2509 | qdev->req_producer_index = 0; | ||
2510 | wmb(); | ||
2511 | ql_write_common_reg_l(qdev, | ||
2512 | &port_regs->CommonRegs.reqQProducerIndex, | ||
2513 | qdev->req_producer_index); | ||
2514 | |||
2515 | netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, | ||
2516 | "tx queued, slot %d, len %d\n", | ||
2517 | qdev->req_producer_index, skb->len); | ||
2518 | |||
2519 | atomic_dec(&qdev->tx_count); | ||
2520 | return NETDEV_TX_OK; | ||
2521 | } | ||
2522 | |||
2523 | static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) | ||
2524 | { | ||
2525 | qdev->req_q_size = | ||
2526 | (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); | ||
2527 | |||
2528 | qdev->req_q_virt_addr = | ||
2529 | pci_alloc_consistent(qdev->pdev, | ||
2530 | (size_t) qdev->req_q_size, | ||
2531 | &qdev->req_q_phy_addr); | ||
2532 | |||
2533 | if ((qdev->req_q_virt_addr == NULL) || | ||
2534 | LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { | ||
2535 | netdev_err(qdev->ndev, "reqQ failed\n"); | ||
2536 | return -ENOMEM; | ||
2537 | } | ||
2538 | |||
2539 | qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); | ||
2540 | |||
2541 | qdev->rsp_q_virt_addr = | ||
2542 | pci_alloc_consistent(qdev->pdev, | ||
2543 | (size_t) qdev->rsp_q_size, | ||
2544 | &qdev->rsp_q_phy_addr); | ||
2545 | |||
2546 | if ((qdev->rsp_q_virt_addr == NULL) || | ||
2547 | LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { | ||
2548 | netdev_err(qdev->ndev, "rspQ allocation failed\n"); | ||
2549 | pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, | ||
2550 | qdev->req_q_virt_addr, | ||
2551 | qdev->req_q_phy_addr); | ||
2552 | return -ENOMEM; | ||
2553 | } | ||
2554 | |||
2555 | set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); | ||
2556 | |||
2557 | return 0; | ||
2558 | } | ||
2559 | |||
2560 | static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) | ||
2561 | { | ||
2562 | if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { | ||
2563 | netdev_info(qdev->ndev, "Already done\n"); | ||
2564 | return; | ||
2565 | } | ||
2566 | |||
2567 | pci_free_consistent(qdev->pdev, | ||
2568 | qdev->req_q_size, | ||
2569 | qdev->req_q_virt_addr, qdev->req_q_phy_addr); | ||
2570 | |||
2571 | qdev->req_q_virt_addr = NULL; | ||
2572 | |||
2573 | pci_free_consistent(qdev->pdev, | ||
2574 | qdev->rsp_q_size, | ||
2575 | qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); | ||
2576 | |||
2577 | qdev->rsp_q_virt_addr = NULL; | ||
2578 | |||
2579 | clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); | ||
2580 | } | ||
2581 | |||
2582 | static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) | ||
2583 | { | ||
2584 | /* Create Large Buffer Queue */ | ||
2585 | qdev->lrg_buf_q_size = | ||
2586 | qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); | ||
2587 | if (qdev->lrg_buf_q_size < PAGE_SIZE) | ||
2588 | qdev->lrg_buf_q_alloc_size = PAGE_SIZE; | ||
2589 | else | ||
2590 | qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; | ||
2591 | |||
2592 | qdev->lrg_buf = | ||
2593 | kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), | ||
2594 | GFP_KERNEL); | ||
2595 | if (qdev->lrg_buf == NULL) { | ||
2596 | netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); | ||
2597 | return -ENOMEM; | ||
2598 | } | ||
2599 | |||
2600 | qdev->lrg_buf_q_alloc_virt_addr = | ||
2601 | pci_alloc_consistent(qdev->pdev, | ||
2602 | qdev->lrg_buf_q_alloc_size, | ||
2603 | &qdev->lrg_buf_q_alloc_phy_addr); | ||
2604 | |||
2605 | if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { | ||
2606 | netdev_err(qdev->ndev, "lBufQ failed\n"); | ||
2607 | return -ENOMEM; | ||
2608 | } | ||
2609 | qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; | ||
2610 | qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; | ||
2611 | |||
2612 | /* Create Small Buffer Queue */ | ||
2613 | qdev->small_buf_q_size = | ||
2614 | NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); | ||
2615 | if (qdev->small_buf_q_size < PAGE_SIZE) | ||
2616 | qdev->small_buf_q_alloc_size = PAGE_SIZE; | ||
2617 | else | ||
2618 | qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; | ||
2619 | |||
2620 | qdev->small_buf_q_alloc_virt_addr = | ||
2621 | pci_alloc_consistent(qdev->pdev, | ||
2622 | qdev->small_buf_q_alloc_size, | ||
2623 | &qdev->small_buf_q_alloc_phy_addr); | ||
2624 | |||
2625 | if (qdev->small_buf_q_alloc_virt_addr == NULL) { | ||
2626 | netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); | ||
2627 | pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, | ||
2628 | qdev->lrg_buf_q_alloc_virt_addr, | ||
2629 | qdev->lrg_buf_q_alloc_phy_addr); | ||
2630 | return -ENOMEM; | ||
2631 | } | ||
2632 | |||
2633 | qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; | ||
2634 | qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; | ||
2635 | set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); | ||
2636 | return 0; | ||
2637 | } | ||
2638 | |||
2639 | static void ql_free_buffer_queues(struct ql3_adapter *qdev) | ||
2640 | { | ||
2641 | if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { | ||
2642 | netdev_info(qdev->ndev, "Already done\n"); | ||
2643 | return; | ||
2644 | } | ||
2645 | kfree(qdev->lrg_buf); | ||
2646 | pci_free_consistent(qdev->pdev, | ||
2647 | qdev->lrg_buf_q_alloc_size, | ||
2648 | qdev->lrg_buf_q_alloc_virt_addr, | ||
2649 | qdev->lrg_buf_q_alloc_phy_addr); | ||
2650 | |||
2651 | qdev->lrg_buf_q_virt_addr = NULL; | ||
2652 | |||
2653 | pci_free_consistent(qdev->pdev, | ||
2654 | qdev->small_buf_q_alloc_size, | ||
2655 | qdev->small_buf_q_alloc_virt_addr, | ||
2656 | qdev->small_buf_q_alloc_phy_addr); | ||
2657 | |||
2658 | qdev->small_buf_q_virt_addr = NULL; | ||
2659 | |||
2660 | clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); | ||
2661 | } | ||
2662 | |||
2663 | static int ql_alloc_small_buffers(struct ql3_adapter *qdev) | ||
2664 | { | ||
2665 | int i; | ||
2666 | struct bufq_addr_element *small_buf_q_entry; | ||
2667 | |||
2668 | /* Currently we allocate on one of memory and use it for smallbuffers */ | ||
2669 | qdev->small_buf_total_size = | ||
2670 | (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * | ||
2671 | QL_SMALL_BUFFER_SIZE); | ||
2672 | |||
2673 | qdev->small_buf_virt_addr = | ||
2674 | pci_alloc_consistent(qdev->pdev, | ||
2675 | qdev->small_buf_total_size, | ||
2676 | &qdev->small_buf_phy_addr); | ||
2677 | |||
2678 | if (qdev->small_buf_virt_addr == NULL) { | ||
2679 | netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); | ||
2680 | return -ENOMEM; | ||
2681 | } | ||
2682 | |||
2683 | qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); | ||
2684 | qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); | ||
2685 | |||
2686 | small_buf_q_entry = qdev->small_buf_q_virt_addr; | ||
2687 | |||
2688 | /* Initialize the small buffer queue. */ | ||
2689 | for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { | ||
2690 | small_buf_q_entry->addr_high = | ||
2691 | cpu_to_le32(qdev->small_buf_phy_addr_high); | ||
2692 | small_buf_q_entry->addr_low = | ||
2693 | cpu_to_le32(qdev->small_buf_phy_addr_low + | ||
2694 | (i * QL_SMALL_BUFFER_SIZE)); | ||
2695 | small_buf_q_entry++; | ||
2696 | } | ||
2697 | qdev->small_buf_index = 0; | ||
2698 | set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); | ||
2699 | return 0; | ||
2700 | } | ||
2701 | |||
2702 | static void ql_free_small_buffers(struct ql3_adapter *qdev) | ||
2703 | { | ||
2704 | if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { | ||
2705 | netdev_info(qdev->ndev, "Already done\n"); | ||
2706 | return; | ||
2707 | } | ||
2708 | if (qdev->small_buf_virt_addr != NULL) { | ||
2709 | pci_free_consistent(qdev->pdev, | ||
2710 | qdev->small_buf_total_size, | ||
2711 | qdev->small_buf_virt_addr, | ||
2712 | qdev->small_buf_phy_addr); | ||
2713 | |||
2714 | qdev->small_buf_virt_addr = NULL; | ||
2715 | } | ||
2716 | } | ||
2717 | |||
2718 | static void ql_free_large_buffers(struct ql3_adapter *qdev) | ||
2719 | { | ||
2720 | int i = 0; | ||
2721 | struct ql_rcv_buf_cb *lrg_buf_cb; | ||
2722 | |||
2723 | for (i = 0; i < qdev->num_large_buffers; i++) { | ||
2724 | lrg_buf_cb = &qdev->lrg_buf[i]; | ||
2725 | if (lrg_buf_cb->skb) { | ||
2726 | dev_kfree_skb(lrg_buf_cb->skb); | ||
2727 | pci_unmap_single(qdev->pdev, | ||
2728 | dma_unmap_addr(lrg_buf_cb, mapaddr), | ||
2729 | dma_unmap_len(lrg_buf_cb, maplen), | ||
2730 | PCI_DMA_FROMDEVICE); | ||
2731 | memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); | ||
2732 | } else { | ||
2733 | break; | ||
2734 | } | ||
2735 | } | ||
2736 | } | ||
2737 | |||
2738 | static void ql_init_large_buffers(struct ql3_adapter *qdev) | ||
2739 | { | ||
2740 | int i; | ||
2741 | struct ql_rcv_buf_cb *lrg_buf_cb; | ||
2742 | struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; | ||
2743 | |||
2744 | for (i = 0; i < qdev->num_large_buffers; i++) { | ||
2745 | lrg_buf_cb = &qdev->lrg_buf[i]; | ||
2746 | buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; | ||
2747 | buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; | ||
2748 | buf_addr_ele++; | ||
2749 | } | ||
2750 | qdev->lrg_buf_index = 0; | ||
2751 | qdev->lrg_buf_skb_check = 0; | ||
2752 | } | ||
2753 | |||
2754 | static int ql_alloc_large_buffers(struct ql3_adapter *qdev) | ||
2755 | { | ||
2756 | int i; | ||
2757 | struct ql_rcv_buf_cb *lrg_buf_cb; | ||
2758 | struct sk_buff *skb; | ||
2759 | dma_addr_t map; | ||
2760 | int err; | ||
2761 | |||
2762 | for (i = 0; i < qdev->num_large_buffers; i++) { | ||
2763 | skb = netdev_alloc_skb(qdev->ndev, | ||
2764 | qdev->lrg_buffer_len); | ||
2765 | if (unlikely(!skb)) { | ||
2766 | /* Better luck next round */ | ||
2767 | netdev_err(qdev->ndev, | ||
2768 | "large buff alloc failed for %d bytes at index %d\n", | ||
2769 | qdev->lrg_buffer_len * 2, i); | ||
2770 | ql_free_large_buffers(qdev); | ||
2771 | return -ENOMEM; | ||
2772 | } else { | ||
2773 | |||
2774 | lrg_buf_cb = &qdev->lrg_buf[i]; | ||
2775 | memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); | ||
2776 | lrg_buf_cb->index = i; | ||
2777 | lrg_buf_cb->skb = skb; | ||
2778 | /* | ||
2779 | * We save some space to copy the ethhdr from first | ||
2780 | * buffer | ||
2781 | */ | ||
2782 | skb_reserve(skb, QL_HEADER_SPACE); | ||
2783 | map = pci_map_single(qdev->pdev, | ||
2784 | skb->data, | ||
2785 | qdev->lrg_buffer_len - | ||
2786 | QL_HEADER_SPACE, | ||
2787 | PCI_DMA_FROMDEVICE); | ||
2788 | |||
2789 | err = pci_dma_mapping_error(qdev->pdev, map); | ||
2790 | if (err) { | ||
2791 | netdev_err(qdev->ndev, | ||
2792 | "PCI mapping failed with error: %d\n", | ||
2793 | err); | ||
2794 | ql_free_large_buffers(qdev); | ||
2795 | return -ENOMEM; | ||
2796 | } | ||
2797 | |||
2798 | dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); | ||
2799 | dma_unmap_len_set(lrg_buf_cb, maplen, | ||
2800 | qdev->lrg_buffer_len - | ||
2801 | QL_HEADER_SPACE); | ||
2802 | lrg_buf_cb->buf_phy_addr_low = | ||
2803 | cpu_to_le32(LS_64BITS(map)); | ||
2804 | lrg_buf_cb->buf_phy_addr_high = | ||
2805 | cpu_to_le32(MS_64BITS(map)); | ||
2806 | } | ||
2807 | } | ||
2808 | return 0; | ||
2809 | } | ||
2810 | |||
2811 | static void ql_free_send_free_list(struct ql3_adapter *qdev) | ||
2812 | { | ||
2813 | struct ql_tx_buf_cb *tx_cb; | ||
2814 | int i; | ||
2815 | |||
2816 | tx_cb = &qdev->tx_buf[0]; | ||
2817 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { | ||
2818 | kfree(tx_cb->oal); | ||
2819 | tx_cb->oal = NULL; | ||
2820 | tx_cb++; | ||
2821 | } | ||
2822 | } | ||
2823 | |||
2824 | static int ql_create_send_free_list(struct ql3_adapter *qdev) | ||
2825 | { | ||
2826 | struct ql_tx_buf_cb *tx_cb; | ||
2827 | int i; | ||
2828 | struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; | ||
2829 | |||
2830 | /* Create free list of transmit buffers */ | ||
2831 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { | ||
2832 | |||
2833 | tx_cb = &qdev->tx_buf[i]; | ||
2834 | tx_cb->skb = NULL; | ||
2835 | tx_cb->queue_entry = req_q_curr; | ||
2836 | req_q_curr++; | ||
2837 | tx_cb->oal = kmalloc(512, GFP_KERNEL); | ||
2838 | if (tx_cb->oal == NULL) | ||
2839 | return -1; | ||
2840 | } | ||
2841 | return 0; | ||
2842 | } | ||
2843 | |||
2844 | static int ql_alloc_mem_resources(struct ql3_adapter *qdev) | ||
2845 | { | ||
2846 | if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { | ||
2847 | qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; | ||
2848 | qdev->lrg_buffer_len = NORMAL_MTU_SIZE; | ||
2849 | } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { | ||
2850 | /* | ||
2851 | * Bigger buffers, so less of them. | ||
2852 | */ | ||
2853 | qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; | ||
2854 | qdev->lrg_buffer_len = JUMBO_MTU_SIZE; | ||
2855 | } else { | ||
2856 | netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", | ||
2857 | qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); | ||
2858 | return -ENOMEM; | ||
2859 | } | ||
2860 | qdev->num_large_buffers = | ||
2861 | qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; | ||
2862 | qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; | ||
2863 | qdev->max_frame_size = | ||
2864 | (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; | ||
2865 | |||
2866 | /* | ||
2867 | * First allocate a page of shared memory and use it for shadow | ||
2868 | * locations of Network Request Queue Consumer Address Register and | ||
2869 | * Network Completion Queue Producer Index Register | ||
2870 | */ | ||
2871 | qdev->shadow_reg_virt_addr = | ||
2872 | pci_alloc_consistent(qdev->pdev, | ||
2873 | PAGE_SIZE, &qdev->shadow_reg_phy_addr); | ||
2874 | |||
2875 | if (qdev->shadow_reg_virt_addr != NULL) { | ||
2876 | qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; | ||
2877 | qdev->req_consumer_index_phy_addr_high = | ||
2878 | MS_64BITS(qdev->shadow_reg_phy_addr); | ||
2879 | qdev->req_consumer_index_phy_addr_low = | ||
2880 | LS_64BITS(qdev->shadow_reg_phy_addr); | ||
2881 | |||
2882 | qdev->prsp_producer_index = | ||
2883 | (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); | ||
2884 | qdev->rsp_producer_index_phy_addr_high = | ||
2885 | qdev->req_consumer_index_phy_addr_high; | ||
2886 | qdev->rsp_producer_index_phy_addr_low = | ||
2887 | qdev->req_consumer_index_phy_addr_low + 8; | ||
2888 | } else { | ||
2889 | netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); | ||
2890 | return -ENOMEM; | ||
2891 | } | ||
2892 | |||
2893 | if (ql_alloc_net_req_rsp_queues(qdev) != 0) { | ||
2894 | netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); | ||
2895 | goto err_req_rsp; | ||
2896 | } | ||
2897 | |||
2898 | if (ql_alloc_buffer_queues(qdev) != 0) { | ||
2899 | netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); | ||
2900 | goto err_buffer_queues; | ||
2901 | } | ||
2902 | |||
2903 | if (ql_alloc_small_buffers(qdev) != 0) { | ||
2904 | netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); | ||
2905 | goto err_small_buffers; | ||
2906 | } | ||
2907 | |||
2908 | if (ql_alloc_large_buffers(qdev) != 0) { | ||
2909 | netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); | ||
2910 | goto err_small_buffers; | ||
2911 | } | ||
2912 | |||
2913 | /* Initialize the large buffer queue. */ | ||
2914 | ql_init_large_buffers(qdev); | ||
2915 | if (ql_create_send_free_list(qdev)) | ||
2916 | goto err_free_list; | ||
2917 | |||
2918 | qdev->rsp_current = qdev->rsp_q_virt_addr; | ||
2919 | |||
2920 | return 0; | ||
2921 | err_free_list: | ||
2922 | ql_free_send_free_list(qdev); | ||
2923 | err_small_buffers: | ||
2924 | ql_free_buffer_queues(qdev); | ||
2925 | err_buffer_queues: | ||
2926 | ql_free_net_req_rsp_queues(qdev); | ||
2927 | err_req_rsp: | ||
2928 | pci_free_consistent(qdev->pdev, | ||
2929 | PAGE_SIZE, | ||
2930 | qdev->shadow_reg_virt_addr, | ||
2931 | qdev->shadow_reg_phy_addr); | ||
2932 | |||
2933 | return -ENOMEM; | ||
2934 | } | ||
2935 | |||
2936 | static void ql_free_mem_resources(struct ql3_adapter *qdev) | ||
2937 | { | ||
2938 | ql_free_send_free_list(qdev); | ||
2939 | ql_free_large_buffers(qdev); | ||
2940 | ql_free_small_buffers(qdev); | ||
2941 | ql_free_buffer_queues(qdev); | ||
2942 | ql_free_net_req_rsp_queues(qdev); | ||
2943 | if (qdev->shadow_reg_virt_addr != NULL) { | ||
2944 | pci_free_consistent(qdev->pdev, | ||
2945 | PAGE_SIZE, | ||
2946 | qdev->shadow_reg_virt_addr, | ||
2947 | qdev->shadow_reg_phy_addr); | ||
2948 | qdev->shadow_reg_virt_addr = NULL; | ||
2949 | } | ||
2950 | } | ||
2951 | |||
2952 | static int ql_init_misc_registers(struct ql3_adapter *qdev) | ||
2953 | { | ||
2954 | struct ql3xxx_local_ram_registers __iomem *local_ram = | ||
2955 | (void __iomem *)qdev->mem_map_registers; | ||
2956 | |||
2957 | if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, | ||
2958 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | ||
2959 | 2) << 4)) | ||
2960 | return -1; | ||
2961 | |||
2962 | ql_write_page2_reg(qdev, | ||
2963 | &local_ram->bufletSize, qdev->nvram_data.bufletSize); | ||
2964 | |||
2965 | ql_write_page2_reg(qdev, | ||
2966 | &local_ram->maxBufletCount, | ||
2967 | qdev->nvram_data.bufletCount); | ||
2968 | |||
2969 | ql_write_page2_reg(qdev, | ||
2970 | &local_ram->freeBufletThresholdLow, | ||
2971 | (qdev->nvram_data.tcpWindowThreshold25 << 16) | | ||
2972 | (qdev->nvram_data.tcpWindowThreshold0)); | ||
2973 | |||
2974 | ql_write_page2_reg(qdev, | ||
2975 | &local_ram->freeBufletThresholdHigh, | ||
2976 | qdev->nvram_data.tcpWindowThreshold50); | ||
2977 | |||
2978 | ql_write_page2_reg(qdev, | ||
2979 | &local_ram->ipHashTableBase, | ||
2980 | (qdev->nvram_data.ipHashTableBaseHi << 16) | | ||
2981 | qdev->nvram_data.ipHashTableBaseLo); | ||
2982 | ql_write_page2_reg(qdev, | ||
2983 | &local_ram->ipHashTableCount, | ||
2984 | qdev->nvram_data.ipHashTableSize); | ||
2985 | ql_write_page2_reg(qdev, | ||
2986 | &local_ram->tcpHashTableBase, | ||
2987 | (qdev->nvram_data.tcpHashTableBaseHi << 16) | | ||
2988 | qdev->nvram_data.tcpHashTableBaseLo); | ||
2989 | ql_write_page2_reg(qdev, | ||
2990 | &local_ram->tcpHashTableCount, | ||
2991 | qdev->nvram_data.tcpHashTableSize); | ||
2992 | ql_write_page2_reg(qdev, | ||
2993 | &local_ram->ncbBase, | ||
2994 | (qdev->nvram_data.ncbTableBaseHi << 16) | | ||
2995 | qdev->nvram_data.ncbTableBaseLo); | ||
2996 | ql_write_page2_reg(qdev, | ||
2997 | &local_ram->maxNcbCount, | ||
2998 | qdev->nvram_data.ncbTableSize); | ||
2999 | ql_write_page2_reg(qdev, | ||
3000 | &local_ram->drbBase, | ||
3001 | (qdev->nvram_data.drbTableBaseHi << 16) | | ||
3002 | qdev->nvram_data.drbTableBaseLo); | ||
3003 | ql_write_page2_reg(qdev, | ||
3004 | &local_ram->maxDrbCount, | ||
3005 | qdev->nvram_data.drbTableSize); | ||
3006 | ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); | ||
3007 | return 0; | ||
3008 | } | ||
3009 | |||
3010 | static int ql_adapter_initialize(struct ql3_adapter *qdev) | ||
3011 | { | ||
3012 | u32 value; | ||
3013 | struct ql3xxx_port_registers __iomem *port_regs = | ||
3014 | qdev->mem_map_registers; | ||
3015 | __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
3016 | struct ql3xxx_host_memory_registers __iomem *hmem_regs = | ||
3017 | (void __iomem *)port_regs; | ||
3018 | u32 delay = 10; | ||
3019 | int status = 0; | ||
3020 | unsigned long hw_flags = 0; | ||
3021 | |||
3022 | if (ql_mii_setup(qdev)) | ||
3023 | return -1; | ||
3024 | |||
3025 | /* Bring out PHY out of reset */ | ||
3026 | ql_write_common_reg(qdev, spir, | ||
3027 | (ISP_SERIAL_PORT_IF_WE | | ||
3028 | (ISP_SERIAL_PORT_IF_WE << 16))); | ||
3029 | /* Give the PHY time to come out of reset. */ | ||
3030 | mdelay(100); | ||
3031 | qdev->port_link_state = LS_DOWN; | ||
3032 | netif_carrier_off(qdev->ndev); | ||
3033 | |||
3034 | /* V2 chip fix for ARS-39168. */ | ||
3035 | ql_write_common_reg(qdev, spir, | ||
3036 | (ISP_SERIAL_PORT_IF_SDE | | ||
3037 | (ISP_SERIAL_PORT_IF_SDE << 16))); | ||
3038 | |||
3039 | /* Request Queue Registers */ | ||
3040 | *((u32 *)(qdev->preq_consumer_index)) = 0; | ||
3041 | atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); | ||
3042 | qdev->req_producer_index = 0; | ||
3043 | |||
3044 | ql_write_page1_reg(qdev, | ||
3045 | &hmem_regs->reqConsumerIndexAddrHigh, | ||
3046 | qdev->req_consumer_index_phy_addr_high); | ||
3047 | ql_write_page1_reg(qdev, | ||
3048 | &hmem_regs->reqConsumerIndexAddrLow, | ||
3049 | qdev->req_consumer_index_phy_addr_low); | ||
3050 | |||
3051 | ql_write_page1_reg(qdev, | ||
3052 | &hmem_regs->reqBaseAddrHigh, | ||
3053 | MS_64BITS(qdev->req_q_phy_addr)); | ||
3054 | ql_write_page1_reg(qdev, | ||
3055 | &hmem_regs->reqBaseAddrLow, | ||
3056 | LS_64BITS(qdev->req_q_phy_addr)); | ||
3057 | ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); | ||
3058 | |||
3059 | /* Response Queue Registers */ | ||
3060 | *((__le16 *) (qdev->prsp_producer_index)) = 0; | ||
3061 | qdev->rsp_consumer_index = 0; | ||
3062 | qdev->rsp_current = qdev->rsp_q_virt_addr; | ||
3063 | |||
3064 | ql_write_page1_reg(qdev, | ||
3065 | &hmem_regs->rspProducerIndexAddrHigh, | ||
3066 | qdev->rsp_producer_index_phy_addr_high); | ||
3067 | |||
3068 | ql_write_page1_reg(qdev, | ||
3069 | &hmem_regs->rspProducerIndexAddrLow, | ||
3070 | qdev->rsp_producer_index_phy_addr_low); | ||
3071 | |||
3072 | ql_write_page1_reg(qdev, | ||
3073 | &hmem_regs->rspBaseAddrHigh, | ||
3074 | MS_64BITS(qdev->rsp_q_phy_addr)); | ||
3075 | |||
3076 | ql_write_page1_reg(qdev, | ||
3077 | &hmem_regs->rspBaseAddrLow, | ||
3078 | LS_64BITS(qdev->rsp_q_phy_addr)); | ||
3079 | |||
3080 | ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); | ||
3081 | |||
3082 | /* Large Buffer Queue */ | ||
3083 | ql_write_page1_reg(qdev, | ||
3084 | &hmem_regs->rxLargeQBaseAddrHigh, | ||
3085 | MS_64BITS(qdev->lrg_buf_q_phy_addr)); | ||
3086 | |||
3087 | ql_write_page1_reg(qdev, | ||
3088 | &hmem_regs->rxLargeQBaseAddrLow, | ||
3089 | LS_64BITS(qdev->lrg_buf_q_phy_addr)); | ||
3090 | |||
3091 | ql_write_page1_reg(qdev, | ||
3092 | &hmem_regs->rxLargeQLength, | ||
3093 | qdev->num_lbufq_entries); | ||
3094 | |||
3095 | ql_write_page1_reg(qdev, | ||
3096 | &hmem_regs->rxLargeBufferLength, | ||
3097 | qdev->lrg_buffer_len); | ||
3098 | |||
3099 | /* Small Buffer Queue */ | ||
3100 | ql_write_page1_reg(qdev, | ||
3101 | &hmem_regs->rxSmallQBaseAddrHigh, | ||
3102 | MS_64BITS(qdev->small_buf_q_phy_addr)); | ||
3103 | |||
3104 | ql_write_page1_reg(qdev, | ||
3105 | &hmem_regs->rxSmallQBaseAddrLow, | ||
3106 | LS_64BITS(qdev->small_buf_q_phy_addr)); | ||
3107 | |||
3108 | ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); | ||
3109 | ql_write_page1_reg(qdev, | ||
3110 | &hmem_regs->rxSmallBufferLength, | ||
3111 | QL_SMALL_BUFFER_SIZE); | ||
3112 | |||
3113 | qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; | ||
3114 | qdev->small_buf_release_cnt = 8; | ||
3115 | qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; | ||
3116 | qdev->lrg_buf_release_cnt = 8; | ||
3117 | qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; | ||
3118 | qdev->small_buf_index = 0; | ||
3119 | qdev->lrg_buf_index = 0; | ||
3120 | qdev->lrg_buf_free_count = 0; | ||
3121 | qdev->lrg_buf_free_head = NULL; | ||
3122 | qdev->lrg_buf_free_tail = NULL; | ||
3123 | |||
3124 | ql_write_common_reg(qdev, | ||
3125 | &port_regs->CommonRegs. | ||
3126 | rxSmallQProducerIndex, | ||
3127 | qdev->small_buf_q_producer_index); | ||
3128 | ql_write_common_reg(qdev, | ||
3129 | &port_regs->CommonRegs. | ||
3130 | rxLargeQProducerIndex, | ||
3131 | qdev->lrg_buf_q_producer_index); | ||
3132 | |||
3133 | /* | ||
3134 | * Find out if the chip has already been initialized. If it has, then | ||
3135 | * we skip some of the initialization. | ||
3136 | */ | ||
3137 | clear_bit(QL_LINK_MASTER, &qdev->flags); | ||
3138 | value = ql_read_page0_reg(qdev, &port_regs->portStatus); | ||
3139 | if ((value & PORT_STATUS_IC) == 0) { | ||
3140 | |||
3141 | /* Chip has not been configured yet, so let it rip. */ | ||
3142 | if (ql_init_misc_registers(qdev)) { | ||
3143 | status = -1; | ||
3144 | goto out; | ||
3145 | } | ||
3146 | |||
3147 | value = qdev->nvram_data.tcpMaxWindowSize; | ||
3148 | ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); | ||
3149 | |||
3150 | value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; | ||
3151 | |||
3152 | if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, | ||
3153 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) | ||
3154 | * 2) << 13)) { | ||
3155 | status = -1; | ||
3156 | goto out; | ||
3157 | } | ||
3158 | ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); | ||
3159 | ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, | ||
3160 | (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << | ||
3161 | 16) | (INTERNAL_CHIP_SD | | ||
3162 | INTERNAL_CHIP_WE))); | ||
3163 | ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); | ||
3164 | } | ||
3165 | |||
3166 | if (qdev->mac_index) | ||
3167 | ql_write_page0_reg(qdev, | ||
3168 | &port_regs->mac1MaxFrameLengthReg, | ||
3169 | qdev->max_frame_size); | ||
3170 | else | ||
3171 | ql_write_page0_reg(qdev, | ||
3172 | &port_regs->mac0MaxFrameLengthReg, | ||
3173 | qdev->max_frame_size); | ||
3174 | |||
3175 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | ||
3176 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | ||
3177 | 2) << 7)) { | ||
3178 | status = -1; | ||
3179 | goto out; | ||
3180 | } | ||
3181 | |||
3182 | PHY_Setup(qdev); | ||
3183 | ql_init_scan_mode(qdev); | ||
3184 | ql_get_phy_owner(qdev); | ||
3185 | |||
3186 | /* Load the MAC Configuration */ | ||
3187 | |||
3188 | /* Program lower 32 bits of the MAC address */ | ||
3189 | ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, | ||
3190 | (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); | ||
3191 | ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, | ||
3192 | ((qdev->ndev->dev_addr[2] << 24) | ||
3193 | | (qdev->ndev->dev_addr[3] << 16) | ||
3194 | | (qdev->ndev->dev_addr[4] << 8) | ||
3195 | | qdev->ndev->dev_addr[5])); | ||
3196 | |||
3197 | /* Program top 16 bits of the MAC address */ | ||
3198 | ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, | ||
3199 | ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); | ||
3200 | ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, | ||
3201 | ((qdev->ndev->dev_addr[0] << 8) | ||
3202 | | qdev->ndev->dev_addr[1])); | ||
3203 | |||
3204 | /* Enable Primary MAC */ | ||
3205 | ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, | ||
3206 | ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | | ||
3207 | MAC_ADDR_INDIRECT_PTR_REG_PE)); | ||
3208 | |||
3209 | /* Clear Primary and Secondary IP addresses */ | ||
3210 | ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, | ||
3211 | ((IP_ADDR_INDEX_REG_MASK << 16) | | ||
3212 | (qdev->mac_index << 2))); | ||
3213 | ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); | ||
3214 | |||
3215 | ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, | ||
3216 | ((IP_ADDR_INDEX_REG_MASK << 16) | | ||
3217 | ((qdev->mac_index << 2) + 1))); | ||
3218 | ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); | ||
3219 | |||
3220 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | ||
3221 | |||
3222 | /* Indicate Configuration Complete */ | ||
3223 | ql_write_page0_reg(qdev, | ||
3224 | &port_regs->portControl, | ||
3225 | ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); | ||
3226 | |||
3227 | do { | ||
3228 | value = ql_read_page0_reg(qdev, &port_regs->portStatus); | ||
3229 | if (value & PORT_STATUS_IC) | ||
3230 | break; | ||
3231 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
3232 | msleep(500); | ||
3233 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
3234 | } while (--delay); | ||
3235 | |||
3236 | if (delay == 0) { | ||
3237 | netdev_err(qdev->ndev, "Hw Initialization timeout\n"); | ||
3238 | status = -1; | ||
3239 | goto out; | ||
3240 | } | ||
3241 | |||
3242 | /* Enable Ethernet Function */ | ||
3243 | if (qdev->device_id == QL3032_DEVICE_ID) { | ||
3244 | value = | ||
3245 | (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | | ||
3246 | QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | | ||
3247 | QL3032_PORT_CONTROL_ET); | ||
3248 | ql_write_page0_reg(qdev, &port_regs->functionControl, | ||
3249 | ((value << 16) | value)); | ||
3250 | } else { | ||
3251 | value = | ||
3252 | (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | | ||
3253 | PORT_CONTROL_HH); | ||
3254 | ql_write_page0_reg(qdev, &port_regs->portControl, | ||
3255 | ((value << 16) | value)); | ||
3256 | } | ||
3257 | |||
3258 | |||
3259 | out: | ||
3260 | return status; | ||
3261 | } | ||
3262 | |||
3263 | /* | ||
3264 | * Caller holds hw_lock. | ||
3265 | */ | ||
3266 | static int ql_adapter_reset(struct ql3_adapter *qdev) | ||
3267 | { | ||
3268 | struct ql3xxx_port_registers __iomem *port_regs = | ||
3269 | qdev->mem_map_registers; | ||
3270 | int status = 0; | ||
3271 | u16 value; | ||
3272 | int max_wait_time; | ||
3273 | |||
3274 | set_bit(QL_RESET_ACTIVE, &qdev->flags); | ||
3275 | clear_bit(QL_RESET_DONE, &qdev->flags); | ||
3276 | |||
3277 | /* | ||
3278 | * Issue soft reset to chip. | ||
3279 | */ | ||
3280 | netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); | ||
3281 | ql_write_common_reg(qdev, | ||
3282 | &port_regs->CommonRegs.ispControlStatus, | ||
3283 | ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); | ||
3284 | |||
3285 | /* Wait 3 seconds for reset to complete. */ | ||
3286 | netdev_printk(KERN_DEBUG, qdev->ndev, | ||
3287 | "Wait 10 milliseconds for reset to complete\n"); | ||
3288 | |||
3289 | /* Wait until the firmware tells us the Soft Reset is done */ | ||
3290 | max_wait_time = 5; | ||
3291 | do { | ||
3292 | value = | ||
3293 | ql_read_common_reg(qdev, | ||
3294 | &port_regs->CommonRegs.ispControlStatus); | ||
3295 | if ((value & ISP_CONTROL_SR) == 0) | ||
3296 | break; | ||
3297 | |||
3298 | ssleep(1); | ||
3299 | } while ((--max_wait_time)); | ||
3300 | |||
3301 | /* | ||
3302 | * Also, make sure that the Network Reset Interrupt bit has been | ||
3303 | * cleared after the soft reset has taken place. | ||
3304 | */ | ||
3305 | value = | ||
3306 | ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); | ||
3307 | if (value & ISP_CONTROL_RI) { | ||
3308 | netdev_printk(KERN_DEBUG, qdev->ndev, | ||
3309 | "clearing RI after reset\n"); | ||
3310 | ql_write_common_reg(qdev, | ||
3311 | &port_regs->CommonRegs. | ||
3312 | ispControlStatus, | ||
3313 | ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); | ||
3314 | } | ||
3315 | |||
3316 | if (max_wait_time == 0) { | ||
3317 | /* Issue Force Soft Reset */ | ||
3318 | ql_write_common_reg(qdev, | ||
3319 | &port_regs->CommonRegs. | ||
3320 | ispControlStatus, | ||
3321 | ((ISP_CONTROL_FSR << 16) | | ||
3322 | ISP_CONTROL_FSR)); | ||
3323 | /* | ||
3324 | * Wait until the firmware tells us the Force Soft Reset is | ||
3325 | * done | ||
3326 | */ | ||
3327 | max_wait_time = 5; | ||
3328 | do { | ||
3329 | value = ql_read_common_reg(qdev, | ||
3330 | &port_regs->CommonRegs. | ||
3331 | ispControlStatus); | ||
3332 | if ((value & ISP_CONTROL_FSR) == 0) | ||
3333 | break; | ||
3334 | ssleep(1); | ||
3335 | } while ((--max_wait_time)); | ||
3336 | } | ||
3337 | if (max_wait_time == 0) | ||
3338 | status = 1; | ||
3339 | |||
3340 | clear_bit(QL_RESET_ACTIVE, &qdev->flags); | ||
3341 | set_bit(QL_RESET_DONE, &qdev->flags); | ||
3342 | return status; | ||
3343 | } | ||
3344 | |||
3345 | static void ql_set_mac_info(struct ql3_adapter *qdev) | ||
3346 | { | ||
3347 | struct ql3xxx_port_registers __iomem *port_regs = | ||
3348 | qdev->mem_map_registers; | ||
3349 | u32 value, port_status; | ||
3350 | u8 func_number; | ||
3351 | |||
3352 | /* Get the function number */ | ||
3353 | value = | ||
3354 | ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); | ||
3355 | func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); | ||
3356 | port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); | ||
3357 | switch (value & ISP_CONTROL_FN_MASK) { | ||
3358 | case ISP_CONTROL_FN0_NET: | ||
3359 | qdev->mac_index = 0; | ||
3360 | qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; | ||
3361 | qdev->mb_bit_mask = FN0_MA_BITS_MASK; | ||
3362 | qdev->PHYAddr = PORT0_PHY_ADDRESS; | ||
3363 | if (port_status & PORT_STATUS_SM0) | ||
3364 | set_bit(QL_LINK_OPTICAL, &qdev->flags); | ||
3365 | else | ||
3366 | clear_bit(QL_LINK_OPTICAL, &qdev->flags); | ||
3367 | break; | ||
3368 | |||
3369 | case ISP_CONTROL_FN1_NET: | ||
3370 | qdev->mac_index = 1; | ||
3371 | qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; | ||
3372 | qdev->mb_bit_mask = FN1_MA_BITS_MASK; | ||
3373 | qdev->PHYAddr = PORT1_PHY_ADDRESS; | ||
3374 | if (port_status & PORT_STATUS_SM1) | ||
3375 | set_bit(QL_LINK_OPTICAL, &qdev->flags); | ||
3376 | else | ||
3377 | clear_bit(QL_LINK_OPTICAL, &qdev->flags); | ||
3378 | break; | ||
3379 | |||
3380 | case ISP_CONTROL_FN0_SCSI: | ||
3381 | case ISP_CONTROL_FN1_SCSI: | ||
3382 | default: | ||
3383 | netdev_printk(KERN_DEBUG, qdev->ndev, | ||
3384 | "Invalid function number, ispControlStatus = 0x%x\n", | ||
3385 | value); | ||
3386 | break; | ||
3387 | } | ||
3388 | qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; | ||
3389 | } | ||
3390 | |||
3391 | static void ql_display_dev_info(struct net_device *ndev) | ||
3392 | { | ||
3393 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
3394 | struct pci_dev *pdev = qdev->pdev; | ||
3395 | |||
3396 | netdev_info(ndev, | ||
3397 | "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", | ||
3398 | DRV_NAME, qdev->index, qdev->chip_rev_id, | ||
3399 | qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", | ||
3400 | qdev->pci_slot); | ||
3401 | netdev_info(ndev, "%s Interface\n", | ||
3402 | test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); | ||
3403 | |||
3404 | /* | ||
3405 | * Print PCI bus width/type. | ||
3406 | */ | ||
3407 | netdev_info(ndev, "Bus interface is %s %s\n", | ||
3408 | ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), | ||
3409 | ((qdev->pci_x) ? "PCI-X" : "PCI")); | ||
3410 | |||
3411 | netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", | ||
3412 | qdev->mem_map_registers); | ||
3413 | netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); | ||
3414 | |||
3415 | netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); | ||
3416 | } | ||
3417 | |||
3418 | static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) | ||
3419 | { | ||
3420 | struct net_device *ndev = qdev->ndev; | ||
3421 | int retval = 0; | ||
3422 | |||
3423 | netif_stop_queue(ndev); | ||
3424 | netif_carrier_off(ndev); | ||
3425 | |||
3426 | clear_bit(QL_ADAPTER_UP, &qdev->flags); | ||
3427 | clear_bit(QL_LINK_MASTER, &qdev->flags); | ||
3428 | |||
3429 | ql_disable_interrupts(qdev); | ||
3430 | |||
3431 | free_irq(qdev->pdev->irq, ndev); | ||
3432 | |||
3433 | if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { | ||
3434 | netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); | ||
3435 | clear_bit(QL_MSI_ENABLED, &qdev->flags); | ||
3436 | pci_disable_msi(qdev->pdev); | ||
3437 | } | ||
3438 | |||
3439 | del_timer_sync(&qdev->adapter_timer); | ||
3440 | |||
3441 | napi_disable(&qdev->napi); | ||
3442 | |||
3443 | if (do_reset) { | ||
3444 | int soft_reset; | ||
3445 | unsigned long hw_flags; | ||
3446 | |||
3447 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
3448 | if (ql_wait_for_drvr_lock(qdev)) { | ||
3449 | soft_reset = ql_adapter_reset(qdev); | ||
3450 | if (soft_reset) { | ||
3451 | netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", | ||
3452 | qdev->index); | ||
3453 | } | ||
3454 | netdev_err(ndev, | ||
3455 | "Releasing driver lock via chip reset\n"); | ||
3456 | } else { | ||
3457 | netdev_err(ndev, | ||
3458 | "Could not acquire driver lock to do reset!\n"); | ||
3459 | retval = -1; | ||
3460 | } | ||
3461 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
3462 | } | ||
3463 | ql_free_mem_resources(qdev); | ||
3464 | return retval; | ||
3465 | } | ||
3466 | |||
3467 | static int ql_adapter_up(struct ql3_adapter *qdev) | ||
3468 | { | ||
3469 | struct net_device *ndev = qdev->ndev; | ||
3470 | int err; | ||
3471 | unsigned long irq_flags = IRQF_SHARED; | ||
3472 | unsigned long hw_flags; | ||
3473 | |||
3474 | if (ql_alloc_mem_resources(qdev)) { | ||
3475 | netdev_err(ndev, "Unable to allocate buffers\n"); | ||
3476 | return -ENOMEM; | ||
3477 | } | ||
3478 | |||
3479 | if (qdev->msi) { | ||
3480 | if (pci_enable_msi(qdev->pdev)) { | ||
3481 | netdev_err(ndev, | ||
3482 | "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); | ||
3483 | qdev->msi = 0; | ||
3484 | } else { | ||
3485 | netdev_info(ndev, "MSI Enabled...\n"); | ||
3486 | set_bit(QL_MSI_ENABLED, &qdev->flags); | ||
3487 | irq_flags &= ~IRQF_SHARED; | ||
3488 | } | ||
3489 | } | ||
3490 | |||
3491 | err = request_irq(qdev->pdev->irq, ql3xxx_isr, | ||
3492 | irq_flags, ndev->name, ndev); | ||
3493 | if (err) { | ||
3494 | netdev_err(ndev, | ||
3495 | "Failed to reserve interrupt %d - already in use\n", | ||
3496 | qdev->pdev->irq); | ||
3497 | goto err_irq; | ||
3498 | } | ||
3499 | |||
3500 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
3501 | |||
3502 | err = ql_wait_for_drvr_lock(qdev); | ||
3503 | if (err) { | ||
3504 | err = ql_adapter_initialize(qdev); | ||
3505 | if (err) { | ||
3506 | netdev_err(ndev, "Unable to initialize adapter\n"); | ||
3507 | goto err_init; | ||
3508 | } | ||
3509 | netdev_err(ndev, "Releasing driver lock\n"); | ||
3510 | ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); | ||
3511 | } else { | ||
3512 | netdev_err(ndev, "Could not acquire driver lock\n"); | ||
3513 | goto err_lock; | ||
3514 | } | ||
3515 | |||
3516 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
3517 | |||
3518 | set_bit(QL_ADAPTER_UP, &qdev->flags); | ||
3519 | |||
3520 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); | ||
3521 | |||
3522 | napi_enable(&qdev->napi); | ||
3523 | ql_enable_interrupts(qdev); | ||
3524 | return 0; | ||
3525 | |||
3526 | err_init: | ||
3527 | ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); | ||
3528 | err_lock: | ||
3529 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
3530 | free_irq(qdev->pdev->irq, ndev); | ||
3531 | err_irq: | ||
3532 | if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { | ||
3533 | netdev_info(ndev, "calling pci_disable_msi()\n"); | ||
3534 | clear_bit(QL_MSI_ENABLED, &qdev->flags); | ||
3535 | pci_disable_msi(qdev->pdev); | ||
3536 | } | ||
3537 | return err; | ||
3538 | } | ||
3539 | |||
3540 | static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) | ||
3541 | { | ||
3542 | if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { | ||
3543 | netdev_err(qdev->ndev, | ||
3544 | "Driver up/down cycle failed, closing device\n"); | ||
3545 | rtnl_lock(); | ||
3546 | dev_close(qdev->ndev); | ||
3547 | rtnl_unlock(); | ||
3548 | return -1; | ||
3549 | } | ||
3550 | return 0; | ||
3551 | } | ||
3552 | |||
3553 | static int ql3xxx_close(struct net_device *ndev) | ||
3554 | { | ||
3555 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
3556 | |||
3557 | /* | ||
3558 | * Wait for device to recover from a reset. | ||
3559 | * (Rarely happens, but possible.) | ||
3560 | */ | ||
3561 | while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) | ||
3562 | msleep(50); | ||
3563 | |||
3564 | ql_adapter_down(qdev, QL_DO_RESET); | ||
3565 | return 0; | ||
3566 | } | ||
3567 | |||
3568 | static int ql3xxx_open(struct net_device *ndev) | ||
3569 | { | ||
3570 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
3571 | return ql_adapter_up(qdev); | ||
3572 | } | ||
3573 | |||
3574 | static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) | ||
3575 | { | ||
3576 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
3577 | struct ql3xxx_port_registers __iomem *port_regs = | ||
3578 | qdev->mem_map_registers; | ||
3579 | struct sockaddr *addr = p; | ||
3580 | unsigned long hw_flags; | ||
3581 | |||
3582 | if (netif_running(ndev)) | ||
3583 | return -EBUSY; | ||
3584 | |||
3585 | if (!is_valid_ether_addr(addr->sa_data)) | ||
3586 | return -EADDRNOTAVAIL; | ||
3587 | |||
3588 | memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); | ||
3589 | |||
3590 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
3591 | /* Program lower 32 bits of the MAC address */ | ||
3592 | ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, | ||
3593 | (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); | ||
3594 | ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, | ||
3595 | ((ndev->dev_addr[2] << 24) | (ndev-> | ||
3596 | dev_addr[3] << 16) | | ||
3597 | (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); | ||
3598 | |||
3599 | /* Program top 16 bits of the MAC address */ | ||
3600 | ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, | ||
3601 | ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); | ||
3602 | ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, | ||
3603 | ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); | ||
3604 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
3605 | |||
3606 | return 0; | ||
3607 | } | ||
3608 | |||
3609 | static void ql3xxx_tx_timeout(struct net_device *ndev) | ||
3610 | { | ||
3611 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
3612 | |||
3613 | netdev_err(ndev, "Resetting...\n"); | ||
3614 | /* | ||
3615 | * Stop the queues, we've got a problem. | ||
3616 | */ | ||
3617 | netif_stop_queue(ndev); | ||
3618 | |||
3619 | /* | ||
3620 | * Wake up the worker to process this event. | ||
3621 | */ | ||
3622 | queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); | ||
3623 | } | ||
3624 | |||
3625 | static void ql_reset_work(struct work_struct *work) | ||
3626 | { | ||
3627 | struct ql3_adapter *qdev = | ||
3628 | container_of(work, struct ql3_adapter, reset_work.work); | ||
3629 | struct net_device *ndev = qdev->ndev; | ||
3630 | u32 value; | ||
3631 | struct ql_tx_buf_cb *tx_cb; | ||
3632 | int max_wait_time, i; | ||
3633 | struct ql3xxx_port_registers __iomem *port_regs = | ||
3634 | qdev->mem_map_registers; | ||
3635 | unsigned long hw_flags; | ||
3636 | |||
3637 | if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { | ||
3638 | clear_bit(QL_LINK_MASTER, &qdev->flags); | ||
3639 | |||
3640 | /* | ||
3641 | * Loop through the active list and return the skb. | ||
3642 | */ | ||
3643 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { | ||
3644 | int j; | ||
3645 | tx_cb = &qdev->tx_buf[i]; | ||
3646 | if (tx_cb->skb) { | ||
3647 | netdev_printk(KERN_DEBUG, ndev, | ||
3648 | "Freeing lost SKB\n"); | ||
3649 | pci_unmap_single(qdev->pdev, | ||
3650 | dma_unmap_addr(&tx_cb->map[0], | ||
3651 | mapaddr), | ||
3652 | dma_unmap_len(&tx_cb->map[0], maplen), | ||
3653 | PCI_DMA_TODEVICE); | ||
3654 | for (j = 1; j < tx_cb->seg_count; j++) { | ||
3655 | pci_unmap_page(qdev->pdev, | ||
3656 | dma_unmap_addr(&tx_cb->map[j], | ||
3657 | mapaddr), | ||
3658 | dma_unmap_len(&tx_cb->map[j], | ||
3659 | maplen), | ||
3660 | PCI_DMA_TODEVICE); | ||
3661 | } | ||
3662 | dev_kfree_skb(tx_cb->skb); | ||
3663 | tx_cb->skb = NULL; | ||
3664 | } | ||
3665 | } | ||
3666 | |||
3667 | netdev_err(ndev, "Clearing NRI after reset\n"); | ||
3668 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
3669 | ql_write_common_reg(qdev, | ||
3670 | &port_regs->CommonRegs. | ||
3671 | ispControlStatus, | ||
3672 | ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); | ||
3673 | /* | ||
3674 | * Wait the for Soft Reset to Complete. | ||
3675 | */ | ||
3676 | max_wait_time = 10; | ||
3677 | do { | ||
3678 | value = ql_read_common_reg(qdev, | ||
3679 | &port_regs->CommonRegs. | ||
3680 | |||
3681 | ispControlStatus); | ||
3682 | if ((value & ISP_CONTROL_SR) == 0) { | ||
3683 | netdev_printk(KERN_DEBUG, ndev, | ||
3684 | "reset completed\n"); | ||
3685 | break; | ||
3686 | } | ||
3687 | |||
3688 | if (value & ISP_CONTROL_RI) { | ||
3689 | netdev_printk(KERN_DEBUG, ndev, | ||
3690 | "clearing NRI after reset\n"); | ||
3691 | ql_write_common_reg(qdev, | ||
3692 | &port_regs-> | ||
3693 | CommonRegs. | ||
3694 | ispControlStatus, | ||
3695 | ((ISP_CONTROL_RI << | ||
3696 | 16) | ISP_CONTROL_RI)); | ||
3697 | } | ||
3698 | |||
3699 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
3700 | ssleep(1); | ||
3701 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
3702 | } while (--max_wait_time); | ||
3703 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
3704 | |||
3705 | if (value & ISP_CONTROL_SR) { | ||
3706 | |||
3707 | /* | ||
3708 | * Set the reset flags and clear the board again. | ||
3709 | * Nothing else to do... | ||
3710 | */ | ||
3711 | netdev_err(ndev, | ||
3712 | "Timed out waiting for reset to complete\n"); | ||
3713 | netdev_err(ndev, "Do a reset\n"); | ||
3714 | clear_bit(QL_RESET_PER_SCSI, &qdev->flags); | ||
3715 | clear_bit(QL_RESET_START, &qdev->flags); | ||
3716 | ql_cycle_adapter(qdev, QL_DO_RESET); | ||
3717 | return; | ||
3718 | } | ||
3719 | |||
3720 | clear_bit(QL_RESET_ACTIVE, &qdev->flags); | ||
3721 | clear_bit(QL_RESET_PER_SCSI, &qdev->flags); | ||
3722 | clear_bit(QL_RESET_START, &qdev->flags); | ||
3723 | ql_cycle_adapter(qdev, QL_NO_RESET); | ||
3724 | } | ||
3725 | } | ||
3726 | |||
3727 | static void ql_tx_timeout_work(struct work_struct *work) | ||
3728 | { | ||
3729 | struct ql3_adapter *qdev = | ||
3730 | container_of(work, struct ql3_adapter, tx_timeout_work.work); | ||
3731 | |||
3732 | ql_cycle_adapter(qdev, QL_DO_RESET); | ||
3733 | } | ||
3734 | |||
3735 | static void ql_get_board_info(struct ql3_adapter *qdev) | ||
3736 | { | ||
3737 | struct ql3xxx_port_registers __iomem *port_regs = | ||
3738 | qdev->mem_map_registers; | ||
3739 | u32 value; | ||
3740 | |||
3741 | value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); | ||
3742 | |||
3743 | qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); | ||
3744 | if (value & PORT_STATUS_64) | ||
3745 | qdev->pci_width = 64; | ||
3746 | else | ||
3747 | qdev->pci_width = 32; | ||
3748 | if (value & PORT_STATUS_X) | ||
3749 | qdev->pci_x = 1; | ||
3750 | else | ||
3751 | qdev->pci_x = 0; | ||
3752 | qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); | ||
3753 | } | ||
3754 | |||
3755 | static void ql3xxx_timer(unsigned long ptr) | ||
3756 | { | ||
3757 | struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; | ||
3758 | queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); | ||
3759 | } | ||
3760 | |||
3761 | static const struct net_device_ops ql3xxx_netdev_ops = { | ||
3762 | .ndo_open = ql3xxx_open, | ||
3763 | .ndo_start_xmit = ql3xxx_send, | ||
3764 | .ndo_stop = ql3xxx_close, | ||
3765 | .ndo_set_multicast_list = NULL, /* not allowed on NIC side */ | ||
3766 | .ndo_change_mtu = eth_change_mtu, | ||
3767 | .ndo_validate_addr = eth_validate_addr, | ||
3768 | .ndo_set_mac_address = ql3xxx_set_mac_address, | ||
3769 | .ndo_tx_timeout = ql3xxx_tx_timeout, | ||
3770 | }; | ||
3771 | |||
3772 | static int __devinit ql3xxx_probe(struct pci_dev *pdev, | ||
3773 | const struct pci_device_id *pci_entry) | ||
3774 | { | ||
3775 | struct net_device *ndev = NULL; | ||
3776 | struct ql3_adapter *qdev = NULL; | ||
3777 | static int cards_found; | ||
3778 | int uninitialized_var(pci_using_dac), err; | ||
3779 | |||
3780 | err = pci_enable_device(pdev); | ||
3781 | if (err) { | ||
3782 | pr_err("%s cannot enable PCI device\n", pci_name(pdev)); | ||
3783 | goto err_out; | ||
3784 | } | ||
3785 | |||
3786 | err = pci_request_regions(pdev, DRV_NAME); | ||
3787 | if (err) { | ||
3788 | pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); | ||
3789 | goto err_out_disable_pdev; | ||
3790 | } | ||
3791 | |||
3792 | pci_set_master(pdev); | ||
3793 | |||
3794 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
3795 | pci_using_dac = 1; | ||
3796 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
3797 | } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { | ||
3798 | pci_using_dac = 0; | ||
3799 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
3800 | } | ||
3801 | |||
3802 | if (err) { | ||
3803 | pr_err("%s no usable DMA configuration\n", pci_name(pdev)); | ||
3804 | goto err_out_free_regions; | ||
3805 | } | ||
3806 | |||
3807 | ndev = alloc_etherdev(sizeof(struct ql3_adapter)); | ||
3808 | if (!ndev) { | ||
3809 | pr_err("%s could not alloc etherdev\n", pci_name(pdev)); | ||
3810 | err = -ENOMEM; | ||
3811 | goto err_out_free_regions; | ||
3812 | } | ||
3813 | |||
3814 | SET_NETDEV_DEV(ndev, &pdev->dev); | ||
3815 | |||
3816 | pci_set_drvdata(pdev, ndev); | ||
3817 | |||
3818 | qdev = netdev_priv(ndev); | ||
3819 | qdev->index = cards_found; | ||
3820 | qdev->ndev = ndev; | ||
3821 | qdev->pdev = pdev; | ||
3822 | qdev->device_id = pci_entry->device; | ||
3823 | qdev->port_link_state = LS_DOWN; | ||
3824 | if (msi) | ||
3825 | qdev->msi = 1; | ||
3826 | |||
3827 | qdev->msg_enable = netif_msg_init(debug, default_msg); | ||
3828 | |||
3829 | if (pci_using_dac) | ||
3830 | ndev->features |= NETIF_F_HIGHDMA; | ||
3831 | if (qdev->device_id == QL3032_DEVICE_ID) | ||
3832 | ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | ||
3833 | |||
3834 | qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); | ||
3835 | if (!qdev->mem_map_registers) { | ||
3836 | pr_err("%s: cannot map device registers\n", pci_name(pdev)); | ||
3837 | err = -EIO; | ||
3838 | goto err_out_free_ndev; | ||
3839 | } | ||
3840 | |||
3841 | spin_lock_init(&qdev->adapter_lock); | ||
3842 | spin_lock_init(&qdev->hw_lock); | ||
3843 | |||
3844 | /* Set driver entry points */ | ||
3845 | ndev->netdev_ops = &ql3xxx_netdev_ops; | ||
3846 | SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); | ||
3847 | ndev->watchdog_timeo = 5 * HZ; | ||
3848 | |||
3849 | netif_napi_add(ndev, &qdev->napi, ql_poll, 64); | ||
3850 | |||
3851 | ndev->irq = pdev->irq; | ||
3852 | |||
3853 | /* make sure the EEPROM is good */ | ||
3854 | if (ql_get_nvram_params(qdev)) { | ||
3855 | pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", | ||
3856 | __func__, qdev->index); | ||
3857 | err = -EIO; | ||
3858 | goto err_out_iounmap; | ||
3859 | } | ||
3860 | |||
3861 | ql_set_mac_info(qdev); | ||
3862 | |||
3863 | /* Validate and set parameters */ | ||
3864 | if (qdev->mac_index) { | ||
3865 | ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; | ||
3866 | ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); | ||
3867 | } else { | ||
3868 | ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; | ||
3869 | ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); | ||
3870 | } | ||
3871 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); | ||
3872 | |||
3873 | ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; | ||
3874 | |||
3875 | /* Record PCI bus information. */ | ||
3876 | ql_get_board_info(qdev); | ||
3877 | |||
3878 | /* | ||
3879 | * Set the Maximum Memory Read Byte Count value. We do this to handle | ||
3880 | * jumbo frames. | ||
3881 | */ | ||
3882 | if (qdev->pci_x) | ||
3883 | pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); | ||
3884 | |||
3885 | err = register_netdev(ndev); | ||
3886 | if (err) { | ||
3887 | pr_err("%s: cannot register net device\n", pci_name(pdev)); | ||
3888 | goto err_out_iounmap; | ||
3889 | } | ||
3890 | |||
3891 | /* we're going to reset, so assume we have no link for now */ | ||
3892 | |||
3893 | netif_carrier_off(ndev); | ||
3894 | netif_stop_queue(ndev); | ||
3895 | |||
3896 | qdev->workqueue = create_singlethread_workqueue(ndev->name); | ||
3897 | INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); | ||
3898 | INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); | ||
3899 | INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); | ||
3900 | |||
3901 | init_timer(&qdev->adapter_timer); | ||
3902 | qdev->adapter_timer.function = ql3xxx_timer; | ||
3903 | qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ | ||
3904 | qdev->adapter_timer.data = (unsigned long)qdev; | ||
3905 | |||
3906 | if (!cards_found) { | ||
3907 | pr_alert("%s\n", DRV_STRING); | ||
3908 | pr_alert("Driver name: %s, Version: %s\n", | ||
3909 | DRV_NAME, DRV_VERSION); | ||
3910 | } | ||
3911 | ql_display_dev_info(ndev); | ||
3912 | |||
3913 | cards_found++; | ||
3914 | return 0; | ||
3915 | |||
3916 | err_out_iounmap: | ||
3917 | iounmap(qdev->mem_map_registers); | ||
3918 | err_out_free_ndev: | ||
3919 | free_netdev(ndev); | ||
3920 | err_out_free_regions: | ||
3921 | pci_release_regions(pdev); | ||
3922 | err_out_disable_pdev: | ||
3923 | pci_disable_device(pdev); | ||
3924 | pci_set_drvdata(pdev, NULL); | ||
3925 | err_out: | ||
3926 | return err; | ||
3927 | } | ||
3928 | |||
3929 | static void __devexit ql3xxx_remove(struct pci_dev *pdev) | ||
3930 | { | ||
3931 | struct net_device *ndev = pci_get_drvdata(pdev); | ||
3932 | struct ql3_adapter *qdev = netdev_priv(ndev); | ||
3933 | |||
3934 | unregister_netdev(ndev); | ||
3935 | |||
3936 | ql_disable_interrupts(qdev); | ||
3937 | |||
3938 | if (qdev->workqueue) { | ||
3939 | cancel_delayed_work(&qdev->reset_work); | ||
3940 | cancel_delayed_work(&qdev->tx_timeout_work); | ||
3941 | destroy_workqueue(qdev->workqueue); | ||
3942 | qdev->workqueue = NULL; | ||
3943 | } | ||
3944 | |||
3945 | iounmap(qdev->mem_map_registers); | ||
3946 | pci_release_regions(pdev); | ||
3947 | pci_set_drvdata(pdev, NULL); | ||
3948 | free_netdev(ndev); | ||
3949 | } | ||
3950 | |||
3951 | static struct pci_driver ql3xxx_driver = { | ||
3952 | |||
3953 | .name = DRV_NAME, | ||
3954 | .id_table = ql3xxx_pci_tbl, | ||
3955 | .probe = ql3xxx_probe, | ||
3956 | .remove = __devexit_p(ql3xxx_remove), | ||
3957 | }; | ||
3958 | |||
3959 | static int __init ql3xxx_init_module(void) | ||
3960 | { | ||
3961 | return pci_register_driver(&ql3xxx_driver); | ||
3962 | } | ||
3963 | |||
3964 | static void __exit ql3xxx_exit(void) | ||
3965 | { | ||
3966 | pci_unregister_driver(&ql3xxx_driver); | ||
3967 | } | ||
3968 | |||
3969 | module_init(ql3xxx_init_module); | ||
3970 | module_exit(ql3xxx_exit); | ||