aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qla3xxx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qla3xxx.c')
-rw-r--r--drivers/net/qla3xxx.c1435
1 files changed, 643 insertions, 792 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 54ebb65ada18..6168a130f33f 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -5,6 +5,8 @@
5 * See LICENSE.qla3xxx for copyright and licensing details. 5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
8#include <linux/kernel.h> 10#include <linux/kernel.h>
9#include <linux/init.h> 11#include <linux/init.h>
10#include <linux/types.h> 12#include <linux/types.h>
@@ -36,14 +38,16 @@
36 38
37#include "qla3xxx.h" 39#include "qla3xxx.h"
38 40
39#define DRV_NAME "qla3xxx" 41#define DRV_NAME "qla3xxx"
40#define DRV_STRING "QLogic ISP3XXX Network Driver" 42#define DRV_STRING "QLogic ISP3XXX Network Driver"
41#define DRV_VERSION "v2.03.00-k5" 43#define DRV_VERSION "v2.03.00-k5"
42#define PFX DRV_NAME " "
43 44
44static const char ql3xxx_driver_name[] = DRV_NAME; 45static const char ql3xxx_driver_name[] = DRV_NAME;
45static const char ql3xxx_driver_version[] = DRV_VERSION; 46static const char ql3xxx_driver_version[] = DRV_VERSION;
46 47
48#define TIMED_OUT_MSG \
49"Timed out waiting for management port to get free before issuing command\n"
50
47MODULE_AUTHOR("QLogic Corporation"); 51MODULE_AUTHOR("QLogic Corporation");
48MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 52MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
49MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
@@ -73,24 +77,24 @@ MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
73/* 77/*
74 * These are the known PHY's which are used 78 * These are the known PHY's which are used
75 */ 79 */
76typedef enum { 80enum PHY_DEVICE_TYPE {
77 PHY_TYPE_UNKNOWN = 0, 81 PHY_TYPE_UNKNOWN = 0,
78 PHY_VITESSE_VSC8211, 82 PHY_VITESSE_VSC8211,
79 PHY_AGERE_ET1011C, 83 PHY_AGERE_ET1011C,
80 MAX_PHY_DEV_TYPES 84 MAX_PHY_DEV_TYPES
81} PHY_DEVICE_et; 85};
82 86
83typedef struct { 87struct PHY_DEVICE_INFO {
84 PHY_DEVICE_et phyDevice; 88 const enum PHY_DEVICE_TYPE phyDevice;
85 u32 phyIdOUI; 89 const u32 phyIdOUI;
86 u16 phyIdModel; 90 const u16 phyIdModel;
87 char *name; 91 const char *name;
88} PHY_DEVICE_INFO_t; 92};
89 93
90static const PHY_DEVICE_INFO_t PHY_DEVICES[] = 94static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
91 {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, 95 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
92 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, 96 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
93 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, 97 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
94}; 98};
95 99
96 100
@@ -100,7 +104,8 @@ static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
100static int ql_sem_spinlock(struct ql3_adapter *qdev, 104static int ql_sem_spinlock(struct ql3_adapter *qdev,
101 u32 sem_mask, u32 sem_bits) 105 u32 sem_mask, u32 sem_bits)
102{ 106{
103 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 107 struct ql3xxx_port_registers __iomem *port_regs =
108 qdev->mem_map_registers;
104 u32 value; 109 u32 value;
105 unsigned int seconds = 3; 110 unsigned int seconds = 3;
106 111
@@ -111,20 +116,22 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev,
111 if ((value & (sem_mask >> 16)) == sem_bits) 116 if ((value & (sem_mask >> 16)) == sem_bits)
112 return 0; 117 return 0;
113 ssleep(1); 118 ssleep(1);
114 } while(--seconds); 119 } while (--seconds);
115 return -1; 120 return -1;
116} 121}
117 122
118static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) 123static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
119{ 124{
120 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 125 struct ql3xxx_port_registers __iomem *port_regs =
126 qdev->mem_map_registers;
121 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); 127 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
122 readl(&port_regs->CommonRegs.semaphoreReg); 128 readl(&port_regs->CommonRegs.semaphoreReg);
123} 129}
124 130
125static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) 131static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
126{ 132{
127 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 133 struct ql3xxx_port_registers __iomem *port_regs =
134 qdev->mem_map_registers;
128 u32 value; 135 u32 value;
129 136
130 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); 137 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
@@ -139,32 +146,28 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
139{ 146{
140 int i = 0; 147 int i = 0;
141 148
142 while (1) { 149 while (i < 10) {
143 if (!ql_sem_lock(qdev, 150 if (i)
144 QL_DRVR_SEM_MASK, 151 ssleep(1);
145 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 152
146 * 2) << 1)) { 153 if (ql_sem_lock(qdev,
147 if (i < 10) { 154 QL_DRVR_SEM_MASK,
148 ssleep(1); 155 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
149 i++; 156 * 2) << 1)) {
150 } else { 157 netdev_printk(KERN_DEBUG, qdev->ndev,
151 printk(KERN_ERR PFX "%s: Timed out waiting for " 158 "driver lock acquired\n");
152 "driver lock...\n",
153 qdev->ndev->name);
154 return 0;
155 }
156 } else {
157 printk(KERN_DEBUG PFX
158 "%s: driver lock acquired.\n",
159 qdev->ndev->name);
160 return 1; 159 return 1;
161 } 160 }
162 } 161 }
162
163 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
164 return 0;
163} 165}
164 166
165static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 167static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
166{ 168{
167 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 169 struct ql3xxx_port_registers __iomem *port_regs =
170 qdev->mem_map_registers;
168 171
169 writel(((ISP_CONTROL_NP_MASK << 16) | page), 172 writel(((ISP_CONTROL_NP_MASK << 16) | page),
170 &port_regs->CommonRegs.ispControlStatus); 173 &port_regs->CommonRegs.ispControlStatus);
@@ -172,8 +175,7 @@ static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
172 qdev->current_page = page; 175 qdev->current_page = page;
173} 176}
174 177
175static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, 178static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
176 u32 __iomem * reg)
177{ 179{
178 u32 value; 180 u32 value;
179 unsigned long hw_flags; 181 unsigned long hw_flags;
@@ -185,8 +187,7 @@ static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
185 return value; 187 return value;
186} 188}
187 189
188static u32 ql_read_common_reg(struct ql3_adapter *qdev, 190static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
189 u32 __iomem * reg)
190{ 191{
191 return readl(reg); 192 return readl(reg);
192} 193}
@@ -199,7 +200,7 @@ static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
199 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 200 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
200 201
201 if (qdev->current_page != 0) 202 if (qdev->current_page != 0)
202 ql_set_register_page(qdev,0); 203 ql_set_register_page(qdev, 0);
203 value = readl(reg); 204 value = readl(reg);
204 205
205 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 206 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -209,7 +210,7 @@ static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
209static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 210static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
210{ 211{
211 if (qdev->current_page != 0) 212 if (qdev->current_page != 0)
212 ql_set_register_page(qdev,0); 213 ql_set_register_page(qdev, 0);
213 return readl(reg); 214 return readl(reg);
214} 215}
215 216
@@ -243,7 +244,7 @@ static void ql_write_page0_reg(struct ql3_adapter *qdev,
243 u32 __iomem *reg, u32 value) 244 u32 __iomem *reg, u32 value)
244{ 245{
245 if (qdev->current_page != 0) 246 if (qdev->current_page != 0)
246 ql_set_register_page(qdev,0); 247 ql_set_register_page(qdev, 0);
247 writel(value, reg); 248 writel(value, reg);
248 readl(reg); 249 readl(reg);
249} 250}
@@ -255,7 +256,7 @@ static void ql_write_page1_reg(struct ql3_adapter *qdev,
255 u32 __iomem *reg, u32 value) 256 u32 __iomem *reg, u32 value)
256{ 257{
257 if (qdev->current_page != 1) 258 if (qdev->current_page != 1)
258 ql_set_register_page(qdev,1); 259 ql_set_register_page(qdev, 1);
259 writel(value, reg); 260 writel(value, reg);
260 readl(reg); 261 readl(reg);
261} 262}
@@ -267,14 +268,15 @@ static void ql_write_page2_reg(struct ql3_adapter *qdev,
267 u32 __iomem *reg, u32 value) 268 u32 __iomem *reg, u32 value)
268{ 269{
269 if (qdev->current_page != 2) 270 if (qdev->current_page != 2)
270 ql_set_register_page(qdev,2); 271 ql_set_register_page(qdev, 2);
271 writel(value, reg); 272 writel(value, reg);
272 readl(reg); 273 readl(reg);
273} 274}
274 275
275static void ql_disable_interrupts(struct ql3_adapter *qdev) 276static void ql_disable_interrupts(struct ql3_adapter *qdev)
276{ 277{
277 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 278 struct ql3xxx_port_registers __iomem *port_regs =
279 qdev->mem_map_registers;
278 280
279 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 281 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
280 (ISP_IMR_ENABLE_INT << 16)); 282 (ISP_IMR_ENABLE_INT << 16));
@@ -283,7 +285,8 @@ static void ql_disable_interrupts(struct ql3_adapter *qdev)
283 285
284static void ql_enable_interrupts(struct ql3_adapter *qdev) 286static void ql_enable_interrupts(struct ql3_adapter *qdev)
285{ 287{
286 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 288 struct ql3xxx_port_registers __iomem *port_regs =
289 qdev->mem_map_registers;
287 290
288 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 291 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
289 ((0xff << 16) | ISP_IMR_ENABLE_INT)); 292 ((0xff << 16) | ISP_IMR_ENABLE_INT));
@@ -308,8 +311,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
308 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 311 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
309 qdev->lrg_buffer_len); 312 qdev->lrg_buffer_len);
310 if (unlikely(!lrg_buf_cb->skb)) { 313 if (unlikely(!lrg_buf_cb->skb)) {
311 printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n", 314 netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
312 qdev->ndev->name);
313 qdev->lrg_buf_skb_check++; 315 qdev->lrg_buf_skb_check++;
314 } else { 316 } else {
315 /* 317 /*
@@ -323,9 +325,10 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
323 QL_HEADER_SPACE, 325 QL_HEADER_SPACE,
324 PCI_DMA_FROMDEVICE); 326 PCI_DMA_FROMDEVICE);
325 err = pci_dma_mapping_error(qdev->pdev, map); 327 err = pci_dma_mapping_error(qdev->pdev, map);
326 if(err) { 328 if (err) {
327 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 329 netdev_err(qdev->ndev,
328 qdev->ndev->name, err); 330 "PCI mapping failed with error: %d\n",
331 err);
329 dev_kfree_skb(lrg_buf_cb->skb); 332 dev_kfree_skb(lrg_buf_cb->skb);
330 lrg_buf_cb->skb = NULL; 333 lrg_buf_cb->skb = NULL;
331 334
@@ -350,10 +353,11 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
350static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter 353static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
351 *qdev) 354 *qdev)
352{ 355{
353 struct ql_rcv_buf_cb *lrg_buf_cb; 356 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
354 357
355 if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) { 358 if (lrg_buf_cb != NULL) {
356 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL) 359 qdev->lrg_buf_free_head = lrg_buf_cb->next;
360 if (qdev->lrg_buf_free_head == NULL)
357 qdev->lrg_buf_free_tail = NULL; 361 qdev->lrg_buf_free_tail = NULL;
358 qdev->lrg_buf_free_count--; 362 qdev->lrg_buf_free_count--;
359 } 363 }
@@ -374,13 +378,13 @@ static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
374static void fm93c56a_select(struct ql3_adapter *qdev) 378static void fm93c56a_select(struct ql3_adapter *qdev)
375{ 379{
376 struct ql3xxx_port_registers __iomem *port_regs = 380 struct ql3xxx_port_registers __iomem *port_regs =
377 qdev->mem_map_registers; 381 qdev->mem_map_registers;
382 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
378 383
379 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
380 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
381 ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 386 ql_write_nvram_reg(qdev, spir,
382 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 387 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
383 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
384} 388}
385 389
386/* 390/*
@@ -393,51 +397,40 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
393 u32 dataBit; 397 u32 dataBit;
394 u32 previousBit; 398 u32 previousBit;
395 struct ql3xxx_port_registers __iomem *port_regs = 399 struct ql3xxx_port_registers __iomem *port_regs =
396 qdev->mem_map_registers; 400 qdev->mem_map_registers;
401 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
397 402
398 /* Clock in a zero, then do the start bit */ 403 /* Clock in a zero, then do the start bit */
399 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 404 ql_write_nvram_reg(qdev, spir,
400 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 405 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
401 AUBURN_EEPROM_DO_1); 406 AUBURN_EEPROM_DO_1));
402 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 407 ql_write_nvram_reg(qdev, spir,
403 ISP_NVRAM_MASK | qdev-> 408 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
404 eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 409 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
405 AUBURN_EEPROM_CLK_RISE); 410 ql_write_nvram_reg(qdev, spir,
406 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 411 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
407 ISP_NVRAM_MASK | qdev-> 412 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
408 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
409 AUBURN_EEPROM_CLK_FALL);
410 413
411 mask = 1 << (FM93C56A_CMD_BITS - 1); 414 mask = 1 << (FM93C56A_CMD_BITS - 1);
412 /* Force the previous data bit to be different */ 415 /* Force the previous data bit to be different */
413 previousBit = 0xffff; 416 previousBit = 0xffff;
414 for (i = 0; i < FM93C56A_CMD_BITS; i++) { 417 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
415 dataBit = 418 dataBit = (cmd & mask)
416 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; 419 ? AUBURN_EEPROM_DO_1
420 : AUBURN_EEPROM_DO_0;
417 if (previousBit != dataBit) { 421 if (previousBit != dataBit) {
418 /* 422 /* If the bit changed, change the DO state to match */
419 * If the bit changed, then change the DO state to 423 ql_write_nvram_reg(qdev, spir,
420 * match 424 (ISP_NVRAM_MASK |
421 */ 425 qdev->eeprom_cmd_data | dataBit));
422 ql_write_nvram_reg(qdev,
423 &port_regs->CommonRegs.
424 serialPortInterfaceReg,
425 ISP_NVRAM_MASK | qdev->
426 eeprom_cmd_data | dataBit);
427 previousBit = dataBit; 426 previousBit = dataBit;
428 } 427 }
429 ql_write_nvram_reg(qdev, 428 ql_write_nvram_reg(qdev, spir,
430 &port_regs->CommonRegs. 429 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
431 serialPortInterfaceReg, 430 dataBit | AUBURN_EEPROM_CLK_RISE));
432 ISP_NVRAM_MASK | qdev-> 431 ql_write_nvram_reg(qdev, spir,
433 eeprom_cmd_data | dataBit | 432 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
434 AUBURN_EEPROM_CLK_RISE); 433 dataBit | AUBURN_EEPROM_CLK_FALL));
435 ql_write_nvram_reg(qdev,
436 &port_regs->CommonRegs.
437 serialPortInterfaceReg,
438 ISP_NVRAM_MASK | qdev->
439 eeprom_cmd_data | dataBit |
440 AUBURN_EEPROM_CLK_FALL);
441 cmd = cmd << 1; 434 cmd = cmd << 1;
442 } 435 }
443 436
@@ -445,33 +438,24 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
445 /* Force the previous data bit to be different */ 438 /* Force the previous data bit to be different */
446 previousBit = 0xffff; 439 previousBit = 0xffff;
447 for (i = 0; i < addrBits; i++) { 440 for (i = 0; i < addrBits; i++) {
448 dataBit = 441 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
449 (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 : 442 : AUBURN_EEPROM_DO_0;
450 AUBURN_EEPROM_DO_0;
451 if (previousBit != dataBit) { 443 if (previousBit != dataBit) {
452 /* 444 /*
453 * If the bit changed, then change the DO state to 445 * If the bit changed, then change the DO state to
454 * match 446 * match
455 */ 447 */
456 ql_write_nvram_reg(qdev, 448 ql_write_nvram_reg(qdev, spir,
457 &port_regs->CommonRegs. 449 (ISP_NVRAM_MASK |
458 serialPortInterfaceReg, 450 qdev->eeprom_cmd_data | dataBit));
459 ISP_NVRAM_MASK | qdev->
460 eeprom_cmd_data | dataBit);
461 previousBit = dataBit; 451 previousBit = dataBit;
462 } 452 }
463 ql_write_nvram_reg(qdev, 453 ql_write_nvram_reg(qdev, spir,
464 &port_regs->CommonRegs. 454 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
465 serialPortInterfaceReg, 455 dataBit | AUBURN_EEPROM_CLK_RISE));
466 ISP_NVRAM_MASK | qdev-> 456 ql_write_nvram_reg(qdev, spir,
467 eeprom_cmd_data | dataBit | 457 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
468 AUBURN_EEPROM_CLK_RISE); 458 dataBit | AUBURN_EEPROM_CLK_FALL));
469 ql_write_nvram_reg(qdev,
470 &port_regs->CommonRegs.
471 serialPortInterfaceReg,
472 ISP_NVRAM_MASK | qdev->
473 eeprom_cmd_data | dataBit |
474 AUBURN_EEPROM_CLK_FALL);
475 eepromAddr = eepromAddr << 1; 459 eepromAddr = eepromAddr << 1;
476 } 460 }
477} 461}
@@ -482,10 +466,11 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
482static void fm93c56a_deselect(struct ql3_adapter *qdev) 466static void fm93c56a_deselect(struct ql3_adapter *qdev)
483{ 467{
484 struct ql3xxx_port_registers __iomem *port_regs = 468 struct ql3xxx_port_registers __iomem *port_regs =
485 qdev->mem_map_registers; 469 qdev->mem_map_registers;
470 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
471
486 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
487 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
488 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
489} 474}
490 475
491/* 476/*
@@ -497,29 +482,23 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
497 u32 data = 0; 482 u32 data = 0;
498 u32 dataBit; 483 u32 dataBit;
499 struct ql3xxx_port_registers __iomem *port_regs = 484 struct ql3xxx_port_registers __iomem *port_regs =
500 qdev->mem_map_registers; 485 qdev->mem_map_registers;
486 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
501 487
502 /* Read the data bits */ 488 /* Read the data bits */
503 /* The first bit is a dummy. Clock right over it. */ 489 /* The first bit is a dummy. Clock right over it. */
504 for (i = 0; i < dataBits; i++) { 490 for (i = 0; i < dataBits; i++) {
505 ql_write_nvram_reg(qdev, 491 ql_write_nvram_reg(qdev, spir,
506 &port_regs->CommonRegs. 492 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
507 serialPortInterfaceReg, 493 AUBURN_EEPROM_CLK_RISE);
508 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 494 ql_write_nvram_reg(qdev, spir,
509 AUBURN_EEPROM_CLK_RISE); 495 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
510 ql_write_nvram_reg(qdev, 496 AUBURN_EEPROM_CLK_FALL);
511 &port_regs->CommonRegs. 497 dataBit = (ql_read_common_reg(qdev, spir) &
512 serialPortInterfaceReg, 498 AUBURN_EEPROM_DI_1) ? 1 : 0;
513 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
514 AUBURN_EEPROM_CLK_FALL);
515 dataBit =
516 (ql_read_common_reg
517 (qdev,
518 &port_regs->CommonRegs.
519 serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
520 data = (data << 1) | dataBit; 499 data = (data << 1) | dataBit;
521 } 500 }
522 *value = (u16) data; 501 *value = (u16)data;
523} 502}
524 503
525/* 504/*
@@ -551,13 +530,12 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev)
551 530
552 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 531 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
553 532
554 pEEPROMData = (u16 *) & qdev->nvram_data; 533 pEEPROMData = (u16 *)&qdev->nvram_data;
555 qdev->eeprom_cmd_data = 0; 534 qdev->eeprom_cmd_data = 0;
556 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 535 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
557 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 536 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
558 2) << 10)) { 537 2) << 10)) {
559 printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n", 538 pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
560 __func__);
561 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 539 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
562 return -1; 540 return -1;
563 } 541 }
@@ -570,8 +548,8 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev)
570 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 548 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
571 549
572 if (checksum != 0) { 550 if (checksum != 0) {
573 printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n", 551 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
574 qdev->ndev->name, checksum); 552 checksum);
575 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 553 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
576 return -1; 554 return -1;
577 } 555 }
@@ -587,7 +565,7 @@ static const u32 PHYAddr[2] = {
587static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) 565static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
588{ 566{
589 struct ql3xxx_port_registers __iomem *port_regs = 567 struct ql3xxx_port_registers __iomem *port_regs =
590 qdev->mem_map_registers; 568 qdev->mem_map_registers;
591 u32 temp; 569 u32 temp;
592 int count = 1000; 570 int count = 1000;
593 571
@@ -604,7 +582,7 @@ static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
604static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) 582static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
605{ 583{
606 struct ql3xxx_port_registers __iomem *port_regs = 584 struct ql3xxx_port_registers __iomem *port_regs =
607 qdev->mem_map_registers; 585 qdev->mem_map_registers;
608 u32 scanControl; 586 u32 scanControl;
609 587
610 if (qdev->numPorts > 1) { 588 if (qdev->numPorts > 1) {
@@ -632,7 +610,7 @@ static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
632{ 610{
633 u8 ret; 611 u8 ret;
634 struct ql3xxx_port_registers __iomem *port_regs = 612 struct ql3xxx_port_registers __iomem *port_regs =
635 qdev->mem_map_registers; 613 qdev->mem_map_registers;
636 614
637 /* See if scan mode is enabled before we turn it off */ 615 /* See if scan mode is enabled before we turn it off */
638 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & 616 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
@@ -662,17 +640,13 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
662 u16 regAddr, u16 value, u32 phyAddr) 640 u16 regAddr, u16 value, u32 phyAddr)
663{ 641{
664 struct ql3xxx_port_registers __iomem *port_regs = 642 struct ql3xxx_port_registers __iomem *port_regs =
665 qdev->mem_map_registers; 643 qdev->mem_map_registers;
666 u8 scanWasEnabled; 644 u8 scanWasEnabled;
667 645
668 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 646 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
669 647
670 if (ql_wait_for_mii_ready(qdev)) { 648 if (ql_wait_for_mii_ready(qdev)) {
671 if (netif_msg_link(qdev)) 649 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
672 printk(KERN_WARNING PFX
673 "%s Timed out waiting for management port to "
674 "get free before issuing command.\n",
675 qdev->ndev->name);
676 return -1; 650 return -1;
677 } 651 }
678 652
@@ -683,11 +657,7 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
683 657
684 /* Wait for write to complete 9/10/04 SJP */ 658 /* Wait for write to complete 9/10/04 SJP */
685 if (ql_wait_for_mii_ready(qdev)) { 659 if (ql_wait_for_mii_ready(qdev)) {
686 if (netif_msg_link(qdev)) 660 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
687 printk(KERN_WARNING PFX
688 "%s: Timed out waiting for management port to "
689 "get free before issuing command.\n",
690 qdev->ndev->name);
691 return -1; 661 return -1;
692 } 662 }
693 663
@@ -698,21 +668,17 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
698} 668}
699 669
700static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, 670static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
701 u16 * value, u32 phyAddr) 671 u16 *value, u32 phyAddr)
702{ 672{
703 struct ql3xxx_port_registers __iomem *port_regs = 673 struct ql3xxx_port_registers __iomem *port_regs =
704 qdev->mem_map_registers; 674 qdev->mem_map_registers;
705 u8 scanWasEnabled; 675 u8 scanWasEnabled;
706 u32 temp; 676 u32 temp;
707 677
708 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 678 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
709 679
710 if (ql_wait_for_mii_ready(qdev)) { 680 if (ql_wait_for_mii_ready(qdev)) {
711 if (netif_msg_link(qdev)) 681 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
712 printk(KERN_WARNING PFX
713 "%s: Timed out waiting for management port to "
714 "get free before issuing command.\n",
715 qdev->ndev->name);
716 return -1; 682 return -1;
717 } 683 }
718 684
@@ -727,11 +693,7 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
727 693
728 /* Wait for the read to complete */ 694 /* Wait for the read to complete */
729 if (ql_wait_for_mii_ready(qdev)) { 695 if (ql_wait_for_mii_ready(qdev)) {
730 if (netif_msg_link(qdev)) 696 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
731 printk(KERN_WARNING PFX
732 "%s: Timed out waiting for management port to "
733 "get free after issuing command.\n",
734 qdev->ndev->name);
735 return -1; 697 return -1;
736 } 698 }
737 699
@@ -747,16 +709,12 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
747static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) 709static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
748{ 710{
749 struct ql3xxx_port_registers __iomem *port_regs = 711 struct ql3xxx_port_registers __iomem *port_regs =
750 qdev->mem_map_registers; 712 qdev->mem_map_registers;
751 713
752 ql_mii_disable_scan_mode(qdev); 714 ql_mii_disable_scan_mode(qdev);
753 715
754 if (ql_wait_for_mii_ready(qdev)) { 716 if (ql_wait_for_mii_ready(qdev)) {
755 if (netif_msg_link(qdev)) 717 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
756 printk(KERN_WARNING PFX
757 "%s: Timed out waiting for management port to "
758 "get free before issuing command.\n",
759 qdev->ndev->name);
760 return -1; 718 return -1;
761 } 719 }
762 720
@@ -767,11 +725,7 @@ static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
767 725
768 /* Wait for write to complete. */ 726 /* Wait for write to complete. */
769 if (ql_wait_for_mii_ready(qdev)) { 727 if (ql_wait_for_mii_ready(qdev)) {
770 if (netif_msg_link(qdev)) 728 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
771 printk(KERN_WARNING PFX
772 "%s: Timed out waiting for management port to "
773 "get free before issuing command.\n",
774 qdev->ndev->name);
775 return -1; 729 return -1;
776 } 730 }
777 731
@@ -784,16 +738,12 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
784{ 738{
785 u32 temp; 739 u32 temp;
786 struct ql3xxx_port_registers __iomem *port_regs = 740 struct ql3xxx_port_registers __iomem *port_regs =
787 qdev->mem_map_registers; 741 qdev->mem_map_registers;
788 742
789 ql_mii_disable_scan_mode(qdev); 743 ql_mii_disable_scan_mode(qdev);
790 744
791 if (ql_wait_for_mii_ready(qdev)) { 745 if (ql_wait_for_mii_ready(qdev)) {
792 if (netif_msg_link(qdev)) 746 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
793 printk(KERN_WARNING PFX
794 "%s: Timed out waiting for management port to "
795 "get free before issuing command.\n",
796 qdev->ndev->name);
797 return -1; 747 return -1;
798 } 748 }
799 749
@@ -808,11 +758,7 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
808 758
809 /* Wait for the read to complete */ 759 /* Wait for the read to complete */
810 if (ql_wait_for_mii_ready(qdev)) { 760 if (ql_wait_for_mii_ready(qdev)) {
811 if (netif_msg_link(qdev)) 761 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
812 printk(KERN_WARNING PFX
813 "%s: Timed out waiting for management port to "
814 "get free before issuing command.\n",
815 qdev->ndev->name);
816 return -1; 762 return -1;
817 } 763 }
818 764
@@ -898,7 +844,7 @@ static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
898 844
899static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 845static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
900{ 846{
901 printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name); 847 netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
902 /* power down device bit 11 = 1 */ 848 /* power down device bit 11 = 1 */
903 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 849 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
904 /* enable diagnostic mode bit 2 = 1 */ 850 /* enable diagnostic mode bit 2 = 1 */
@@ -918,7 +864,8 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
918 /* point to hidden reg 0x2806 */ 864 /* point to hidden reg 0x2806 */
919 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 865 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
920 /* Write new PHYAD w/bit 5 set */ 866 /* Write new PHYAD w/bit 5 set */
921 ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 867 ql_mii_write_reg_ex(qdev, 0x11,
868 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
922 /* 869 /*
923 * Disable diagnostic mode bit 2 = 0 870 * Disable diagnostic mode bit 2 = 0
924 * Power up device bit 11 = 0 871 * Power up device bit 11 = 0
@@ -929,21 +876,19 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
929 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 876 ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
930} 877}
931 878
932static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, 879static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
933 u16 phyIdReg0, u16 phyIdReg1) 880 u16 phyIdReg0, u16 phyIdReg1)
934{ 881{
935 PHY_DEVICE_et result = PHY_TYPE_UNKNOWN; 882 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
936 u32 oui; 883 u32 oui;
937 u16 model; 884 u16 model;
938 int i; 885 int i;
939 886
940 if (phyIdReg0 == 0xffff) { 887 if (phyIdReg0 == 0xffff)
941 return result; 888 return result;
942 }
943 889
944 if (phyIdReg1 == 0xffff) { 890 if (phyIdReg1 == 0xffff)
945 return result; 891 return result;
946 }
947 892
948 /* oui is split between two registers */ 893 /* oui is split between two registers */
949 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); 894 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
@@ -951,15 +896,13 @@ static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
951 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; 896 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
952 897
953 /* Scan table for this PHY */ 898 /* Scan table for this PHY */
954 for(i = 0; i < MAX_PHY_DEV_TYPES; i++) { 899 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
955 if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel)) 900 if ((oui == PHY_DEVICES[i].phyIdOUI) &&
956 { 901 (model == PHY_DEVICES[i].phyIdModel)) {
902 netdev_info(qdev->ndev, "Phy: %s\n",
903 PHY_DEVICES[i].name);
957 result = PHY_DEVICES[i].phyDevice; 904 result = PHY_DEVICES[i].phyDevice;
958 905 break;
959 printk(KERN_INFO "%s: Phy: %s\n",
960 qdev->ndev->name, PHY_DEVICES[i].name);
961
962 break;
963 } 906 }
964 } 907 }
965 908
@@ -970,9 +913,8 @@ static int ql_phy_get_speed(struct ql3_adapter *qdev)
970{ 913{
971 u16 reg; 914 u16 reg;
972 915
973 switch(qdev->phyType) { 916 switch (qdev->phyType) {
974 case PHY_AGERE_ET1011C: 917 case PHY_AGERE_ET1011C: {
975 {
976 if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0) 918 if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
977 return 0; 919 return 0;
978 920
@@ -980,20 +922,20 @@ static int ql_phy_get_speed(struct ql3_adapter *qdev)
980 break; 922 break;
981 } 923 }
982 default: 924 default:
983 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0) 925 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
984 return 0; 926 return 0;
985 927
986 reg = (((reg & 0x18) >> 3) & 3); 928 reg = (((reg & 0x18) >> 3) & 3);
987 } 929 }
988 930
989 switch(reg) { 931 switch (reg) {
990 case 2: 932 case 2:
991 return SPEED_1000; 933 return SPEED_1000;
992 case 1: 934 case 1:
993 return SPEED_100; 935 return SPEED_100;
994 case 0: 936 case 0:
995 return SPEED_10; 937 return SPEED_10;
996 default: 938 default:
997 return -1; 939 return -1;
998 } 940 }
999} 941}
@@ -1002,17 +944,15 @@ static int ql_is_full_dup(struct ql3_adapter *qdev)
1002{ 944{
1003 u16 reg; 945 u16 reg;
1004 946
1005 switch(qdev->phyType) { 947 switch (qdev->phyType) {
1006 case PHY_AGERE_ET1011C: 948 case PHY_AGERE_ET1011C: {
1007 {
1008 if (ql_mii_read_reg(qdev, 0x1A, &reg)) 949 if (ql_mii_read_reg(qdev, 0x1A, &reg))
1009 return 0; 950 return 0;
1010 951
1011 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 952 return ((reg & 0x0080) && (reg & 0x1000)) != 0;
1012 } 953 }
1013 case PHY_VITESSE_VSC8211: 954 case PHY_VITESSE_VSC8211:
1014 default: 955 default: {
1015 {
1016 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0) 956 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
1017 return 0; 957 return 0;
1018 return (reg & PHY_AUX_DUPLEX_STAT) != 0; 958 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
@@ -1040,17 +980,15 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1040 980
1041 /* Determine the PHY we are using by reading the ID's */ 981 /* Determine the PHY we are using by reading the ID's */
1042 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1); 982 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
1043 if(err != 0) { 983 if (err != 0) {
1044 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", 984 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
1045 qdev->ndev->name); 985 return err;
1046 return err;
1047 } 986 }
1048 987
1049 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2); 988 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
1050 if(err != 0) { 989 if (err != 0) {
1051 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", 990 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
1052 qdev->ndev->name); 991 return err;
1053 return err;
1054 } 992 }
1055 993
1056 /* Check if we have a Agere PHY */ 994 /* Check if we have a Agere PHY */
@@ -1058,24 +996,22 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1058 996
1059 /* Determine which MII address we should be using 997 /* Determine which MII address we should be using
1060 determined by the index of the card */ 998 determined by the index of the card */
1061 if (qdev->mac_index == 0) { 999 if (qdev->mac_index == 0)
1062 miiAddr = MII_AGERE_ADDR_1; 1000 miiAddr = MII_AGERE_ADDR_1;
1063 } else { 1001 else
1064 miiAddr = MII_AGERE_ADDR_2; 1002 miiAddr = MII_AGERE_ADDR_2;
1065 }
1066 1003
1067 err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr); 1004 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1068 if(err != 0) { 1005 if (err != 0) {
1069 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", 1006 netdev_err(qdev->ndev,
1070 qdev->ndev->name); 1007 "Could not read from reg PHY_ID_0_REG after Agere detected\n");
1071 return err; 1008 return err;
1072 } 1009 }
1073 1010
1074 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr); 1011 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1075 if(err != 0) { 1012 if (err != 0) {
1076 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", 1013 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1077 qdev->ndev->name); 1014 return err;
1078 return err;
1079 } 1015 }
1080 1016
1081 /* We need to remember to initialize the Agere PHY */ 1017 /* We need to remember to initialize the Agere PHY */
@@ -1090,7 +1026,7 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1090 /* need this here so address gets changed */ 1026 /* need this here so address gets changed */
1091 phyAgereSpecificInit(qdev, miiAddr); 1027 phyAgereSpecificInit(qdev, miiAddr);
1092 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1028 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1093 printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name); 1029 netdev_err(qdev->ndev, "PHY is unknown\n");
1094 return -EIO; 1030 return -EIO;
1095 } 1031 }
1096 1032
@@ -1103,7 +1039,7 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1103static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) 1039static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1104{ 1040{
1105 struct ql3xxx_port_registers __iomem *port_regs = 1041 struct ql3xxx_port_registers __iomem *port_regs =
1106 qdev->mem_map_registers; 1042 qdev->mem_map_registers;
1107 u32 value; 1043 u32 value;
1108 1044
1109 if (enable) 1045 if (enable)
@@ -1123,7 +1059,7 @@ static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1123static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) 1059static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1124{ 1060{
1125 struct ql3xxx_port_registers __iomem *port_regs = 1061 struct ql3xxx_port_registers __iomem *port_regs =
1126 qdev->mem_map_registers; 1062 qdev->mem_map_registers;
1127 u32 value; 1063 u32 value;
1128 1064
1129 if (enable) 1065 if (enable)
@@ -1143,7 +1079,7 @@ static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1143static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) 1079static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1144{ 1080{
1145 struct ql3xxx_port_registers __iomem *port_regs = 1081 struct ql3xxx_port_registers __iomem *port_regs =
1146 qdev->mem_map_registers; 1082 qdev->mem_map_registers;
1147 u32 value; 1083 u32 value;
1148 1084
1149 if (enable) 1085 if (enable)
@@ -1163,7 +1099,7 @@ static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1163static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) 1099static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1164{ 1100{
1165 struct ql3xxx_port_registers __iomem *port_regs = 1101 struct ql3xxx_port_registers __iomem *port_regs =
1166 qdev->mem_map_registers; 1102 qdev->mem_map_registers;
1167 u32 value; 1103 u32 value;
1168 1104
1169 if (enable) 1105 if (enable)
@@ -1183,7 +1119,7 @@ static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1183static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) 1119static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1184{ 1120{
1185 struct ql3xxx_port_registers __iomem *port_regs = 1121 struct ql3xxx_port_registers __iomem *port_regs =
1186 qdev->mem_map_registers; 1122 qdev->mem_map_registers;
1187 u32 value; 1123 u32 value;
1188 1124
1189 if (enable) 1125 if (enable)
@@ -1205,7 +1141,7 @@ static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1205static int ql_is_fiber(struct ql3_adapter *qdev) 1141static int ql_is_fiber(struct ql3_adapter *qdev)
1206{ 1142{
1207 struct ql3xxx_port_registers __iomem *port_regs = 1143 struct ql3xxx_port_registers __iomem *port_regs =
1208 qdev->mem_map_registers; 1144 qdev->mem_map_registers;
1209 u32 bitToCheck = 0; 1145 u32 bitToCheck = 0;
1210 u32 temp; 1146 u32 temp;
1211 1147
@@ -1235,7 +1171,7 @@ static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1235static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) 1171static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1236{ 1172{
1237 struct ql3xxx_port_registers __iomem *port_regs = 1173 struct ql3xxx_port_registers __iomem *port_regs =
1238 qdev->mem_map_registers; 1174 qdev->mem_map_registers;
1239 u32 bitToCheck = 0; 1175 u32 bitToCheck = 0;
1240 u32 temp; 1176 u32 temp;
1241 1177
@@ -1250,18 +1186,11 @@ static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1250 1186
1251 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1187 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1252 if (temp & bitToCheck) { 1188 if (temp & bitToCheck) {
1253 if (netif_msg_link(qdev)) 1189 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1254 printk(KERN_INFO PFX
1255 "%s: Auto-Negotiate complete.\n",
1256 qdev->ndev->name);
1257 return 1; 1190 return 1;
1258 } else {
1259 if (netif_msg_link(qdev))
1260 printk(KERN_WARNING PFX
1261 "%s: Auto-Negotiate incomplete.\n",
1262 qdev->ndev->name);
1263 return 0;
1264 } 1191 }
1192 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1193 return 0;
1265} 1194}
1266 1195
1267/* 1196/*
@@ -1278,7 +1207,7 @@ static int ql_is_neg_pause(struct ql3_adapter *qdev)
1278static int ql_auto_neg_error(struct ql3_adapter *qdev) 1207static int ql_auto_neg_error(struct ql3_adapter *qdev)
1279{ 1208{
1280 struct ql3xxx_port_registers __iomem *port_regs = 1209 struct ql3xxx_port_registers __iomem *port_regs =
1281 qdev->mem_map_registers; 1210 qdev->mem_map_registers;
1282 u32 bitToCheck = 0; 1211 u32 bitToCheck = 0;
1283 u32 temp; 1212 u32 temp;
1284 1213
@@ -1316,7 +1245,7 @@ static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1316static int ql_link_down_detect(struct ql3_adapter *qdev) 1245static int ql_link_down_detect(struct ql3_adapter *qdev)
1317{ 1246{
1318 struct ql3xxx_port_registers __iomem *port_regs = 1247 struct ql3xxx_port_registers __iomem *port_regs =
1319 qdev->mem_map_registers; 1248 qdev->mem_map_registers;
1320 u32 bitToCheck = 0; 1249 u32 bitToCheck = 0;
1321 u32 temp; 1250 u32 temp;
1322 1251
@@ -1340,7 +1269,7 @@ static int ql_link_down_detect(struct ql3_adapter *qdev)
1340static int ql_link_down_detect_clear(struct ql3_adapter *qdev) 1269static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1341{ 1270{
1342 struct ql3xxx_port_registers __iomem *port_regs = 1271 struct ql3xxx_port_registers __iomem *port_regs =
1343 qdev->mem_map_registers; 1272 qdev->mem_map_registers;
1344 1273
1345 switch (qdev->mac_index) { 1274 switch (qdev->mac_index) {
1346 case 0: 1275 case 0:
@@ -1370,7 +1299,7 @@ static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1370static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) 1299static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1371{ 1300{
1372 struct ql3xxx_port_registers __iomem *port_regs = 1301 struct ql3xxx_port_registers __iomem *port_regs =
1373 qdev->mem_map_registers; 1302 qdev->mem_map_registers;
1374 u32 bitToCheck = 0; 1303 u32 bitToCheck = 0;
1375 u32 temp; 1304 u32 temp;
1376 1305
@@ -1387,16 +1316,13 @@ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1387 1316
1388 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1317 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1389 if (temp & bitToCheck) { 1318 if (temp & bitToCheck) {
1390 if (netif_msg_link(qdev)) 1319 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1391 printk(KERN_DEBUG PFX 1320 "not link master\n");
1392 "%s: is not link master.\n", qdev->ndev->name);
1393 return 0; 1321 return 0;
1394 } else {
1395 if (netif_msg_link(qdev))
1396 printk(KERN_DEBUG PFX
1397 "%s: is link master.\n", qdev->ndev->name);
1398 return 1;
1399 } 1322 }
1323
1324 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1325 return 1;
1400} 1326}
1401 1327
1402static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1328static void ql_phy_reset_ex(struct ql3_adapter *qdev)
@@ -1410,19 +1336,20 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1410 u16 reg; 1336 u16 reg;
1411 u16 portConfiguration; 1337 u16 portConfiguration;
1412 1338
1413 if(qdev->phyType == PHY_AGERE_ET1011C) { 1339 if (qdev->phyType == PHY_AGERE_ET1011C)
1414 /* turn off external loopback */
1415 ql_mii_write_reg(qdev, 0x13, 0x0000); 1340 ql_mii_write_reg(qdev, 0x13, 0x0000);
1416 } 1341 /* turn off external loopback */
1417 1342
1418 if(qdev->mac_index == 0) 1343 if (qdev->mac_index == 0)
1419 portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration; 1344 portConfiguration =
1345 qdev->nvram_data.macCfg_port0.portConfiguration;
1420 else 1346 else
1421 portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration; 1347 portConfiguration =
1348 qdev->nvram_data.macCfg_port1.portConfiguration;
1422 1349
1423 /* Some HBA's in the field are set to 0 and they need to 1350 /* Some HBA's in the field are set to 0 and they need to
1424 be reinterpreted with a default value */ 1351 be reinterpreted with a default value */
1425 if(portConfiguration == 0) 1352 if (portConfiguration == 0)
1426 portConfiguration = PORT_CONFIG_DEFAULT; 1353 portConfiguration = PORT_CONFIG_DEFAULT;
1427 1354
1428 /* Set the 1000 advertisements */ 1355 /* Set the 1000 advertisements */
@@ -1430,8 +1357,8 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1430 PHYAddr[qdev->mac_index]); 1357 PHYAddr[qdev->mac_index]);
1431 reg &= ~PHY_GIG_ALL_PARAMS; 1358 reg &= ~PHY_GIG_ALL_PARAMS;
1432 1359
1433 if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1360 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1434 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1361 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1435 reg |= PHY_GIG_ADV_1000F; 1362 reg |= PHY_GIG_ADV_1000F;
1436 else 1363 else
1437 reg |= PHY_GIG_ADV_1000H; 1364 reg |= PHY_GIG_ADV_1000H;
@@ -1445,29 +1372,27 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1445 PHYAddr[qdev->mac_index]); 1372 PHYAddr[qdev->mac_index]);
1446 reg &= ~PHY_NEG_ALL_PARAMS; 1373 reg &= ~PHY_NEG_ALL_PARAMS;
1447 1374
1448 if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) 1375 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1449 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; 1376 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1450 1377
1451 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1378 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1452 if(portConfiguration & PORT_CONFIG_100MB_SPEED) 1379 if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1453 reg |= PHY_NEG_ADV_100F; 1380 reg |= PHY_NEG_ADV_100F;
1454 1381
1455 if(portConfiguration & PORT_CONFIG_10MB_SPEED) 1382 if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1456 reg |= PHY_NEG_ADV_10F; 1383 reg |= PHY_NEG_ADV_10F;
1457 } 1384 }
1458 1385
1459 if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1386 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1460 if(portConfiguration & PORT_CONFIG_100MB_SPEED) 1387 if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1461 reg |= PHY_NEG_ADV_100H; 1388 reg |= PHY_NEG_ADV_100H;
1462 1389
1463 if(portConfiguration & PORT_CONFIG_10MB_SPEED) 1390 if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1464 reg |= PHY_NEG_ADV_10H; 1391 reg |= PHY_NEG_ADV_10H;
1465 } 1392 }
1466 1393
1467 if(portConfiguration & 1394 if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
1468 PORT_CONFIG_1000MB_SPEED) {
1469 reg |= 1; 1395 reg |= 1;
1470 }
1471 1396
1472 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1397 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1473 PHYAddr[qdev->mac_index]); 1398 PHYAddr[qdev->mac_index]);
@@ -1492,7 +1417,7 @@ static void ql_phy_init_ex(struct ql3_adapter *qdev)
1492static u32 ql_get_link_state(struct ql3_adapter *qdev) 1417static u32 ql_get_link_state(struct ql3_adapter *qdev)
1493{ 1418{
1494 struct ql3xxx_port_registers __iomem *port_regs = 1419 struct ql3xxx_port_registers __iomem *port_regs =
1495 qdev->mem_map_registers; 1420 qdev->mem_map_registers;
1496 u32 bitToCheck = 0; 1421 u32 bitToCheck = 0;
1497 u32 temp, linkState; 1422 u32 temp, linkState;
1498 1423
@@ -1504,22 +1429,22 @@ static u32 ql_get_link_state(struct ql3_adapter *qdev)
1504 bitToCheck = PORT_STATUS_UP1; 1429 bitToCheck = PORT_STATUS_UP1;
1505 break; 1430 break;
1506 } 1431 }
1432
1507 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1433 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1508 if (temp & bitToCheck) { 1434 if (temp & bitToCheck)
1509 linkState = LS_UP; 1435 linkState = LS_UP;
1510 } else { 1436 else
1511 linkState = LS_DOWN; 1437 linkState = LS_DOWN;
1512 } 1438
1513 return linkState; 1439 return linkState;
1514} 1440}
1515 1441
1516static int ql_port_start(struct ql3_adapter *qdev) 1442static int ql_port_start(struct ql3_adapter *qdev)
1517{ 1443{
1518 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1444 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1519 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1445 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1520 2) << 7)) { 1446 2) << 7)) {
1521 printk(KERN_ERR "%s: Could not get hw lock for GIO\n", 1447 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1522 qdev->ndev->name);
1523 return -1; 1448 return -1;
1524 } 1449 }
1525 1450
@@ -1537,19 +1462,16 @@ static int ql_port_start(struct ql3_adapter *qdev)
1537static int ql_finish_auto_neg(struct ql3_adapter *qdev) 1462static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1538{ 1463{
1539 1464
1540 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1465 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1541 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1466 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1542 2) << 7)) 1467 2) << 7))
1543 return -1; 1468 return -1;
1544 1469
1545 if (!ql_auto_neg_error(qdev)) { 1470 if (!ql_auto_neg_error(qdev)) {
1546 if (test_bit(QL_LINK_MASTER,&qdev->flags)) { 1471 if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1547 /* configure the MAC */ 1472 /* configure the MAC */
1548 if (netif_msg_link(qdev)) 1473 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1549 printk(KERN_DEBUG PFX 1474 "Configuring link\n");
1550 "%s: Configuring link.\n",
1551 qdev->ndev->
1552 name);
1553 ql_mac_cfg_soft_reset(qdev, 1); 1475 ql_mac_cfg_soft_reset(qdev, 1);
1554 ql_mac_cfg_gig(qdev, 1476 ql_mac_cfg_gig(qdev,
1555 (ql_get_link_speed 1477 (ql_get_link_speed
@@ -1564,43 +1486,32 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1564 ql_mac_cfg_soft_reset(qdev, 0); 1486 ql_mac_cfg_soft_reset(qdev, 0);
1565 1487
1566 /* enable the MAC */ 1488 /* enable the MAC */
1567 if (netif_msg_link(qdev)) 1489 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1568 printk(KERN_DEBUG PFX 1490 "Enabling mac\n");
1569 "%s: Enabling mac.\n",
1570 qdev->ndev->
1571 name);
1572 ql_mac_enable(qdev, 1); 1491 ql_mac_enable(qdev, 1);
1573 } 1492 }
1574 1493
1575 qdev->port_link_state = LS_UP; 1494 qdev->port_link_state = LS_UP;
1576 netif_start_queue(qdev->ndev); 1495 netif_start_queue(qdev->ndev);
1577 netif_carrier_on(qdev->ndev); 1496 netif_carrier_on(qdev->ndev);
1578 if (netif_msg_link(qdev)) 1497 netif_info(qdev, link, qdev->ndev,
1579 printk(KERN_INFO PFX 1498 "Link is up at %d Mbps, %s duplex\n",
1580 "%s: Link is up at %d Mbps, %s duplex.\n", 1499 ql_get_link_speed(qdev),
1581 qdev->ndev->name, 1500 ql_is_link_full_dup(qdev) ? "full" : "half");
1582 ql_get_link_speed(qdev),
1583 ql_is_link_full_dup(qdev)
1584 ? "full" : "half");
1585 1501
1586 } else { /* Remote error detected */ 1502 } else { /* Remote error detected */
1587 1503
1588 if (test_bit(QL_LINK_MASTER,&qdev->flags)) { 1504 if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1589 if (netif_msg_link(qdev)) 1505 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1590 printk(KERN_DEBUG PFX 1506 "Remote error detected. Calling ql_port_start()\n");
1591 "%s: Remote error detected. "
1592 "Calling ql_port_start().\n",
1593 qdev->ndev->
1594 name);
1595 /* 1507 /*
1596 * ql_port_start() is shared code and needs 1508 * ql_port_start() is shared code and needs
1597 * to lock the PHY on it's own. 1509 * to lock the PHY on it's own.
1598 */ 1510 */
1599 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1511 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1600 if(ql_port_start(qdev)) {/* Restart port */ 1512 if (ql_port_start(qdev)) /* Restart port */
1601 return -1; 1513 return -1;
1602 } else 1514 return 0;
1603 return 0;
1604 } 1515 }
1605 } 1516 }
1606 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1517 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
@@ -1619,33 +1530,28 @@ static void ql_link_state_machine_work(struct work_struct *work)
1619 1530
1620 curr_link_state = ql_get_link_state(qdev); 1531 curr_link_state = ql_get_link_state(qdev);
1621 1532
1622 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { 1533 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
1623 if (netif_msg_link(qdev)) 1534 netif_info(qdev, link, qdev->ndev,
1624 printk(KERN_INFO PFX 1535 "Reset in progress, skip processing link state\n");
1625 "%s: Reset in progress, skip processing link "
1626 "state.\n", qdev->ndev->name);
1627 1536
1628 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1537 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1629 1538
1630 /* Restart timer on 2 second interval. */ 1539 /* Restart timer on 2 second interval. */
1631 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\ 1540 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1632 1541
1633 return; 1542 return;
1634 } 1543 }
1635 1544
1636 switch (qdev->port_link_state) { 1545 switch (qdev->port_link_state) {
1637 default: 1546 default:
1638 if (test_bit(QL_LINK_MASTER,&qdev->flags)) { 1547 if (test_bit(QL_LINK_MASTER, &qdev->flags))
1639 ql_port_start(qdev); 1548 ql_port_start(qdev);
1640 }
1641 qdev->port_link_state = LS_DOWN; 1549 qdev->port_link_state = LS_DOWN;
1642 /* Fall Through */ 1550 /* Fall Through */
1643 1551
1644 case LS_DOWN: 1552 case LS_DOWN:
1645 if (curr_link_state == LS_UP) { 1553 if (curr_link_state == LS_UP) {
1646 if (netif_msg_link(qdev)) 1554 netif_info(qdev, link, qdev->ndev, "Link is up\n");
1647 printk(KERN_INFO PFX "%s: Link is up.\n",
1648 qdev->ndev->name);
1649 if (ql_is_auto_neg_complete(qdev)) 1555 if (ql_is_auto_neg_complete(qdev))
1650 ql_finish_auto_neg(qdev); 1556 ql_finish_auto_neg(qdev);
1651 1557
@@ -1662,9 +1568,7 @@ static void ql_link_state_machine_work(struct work_struct *work)
1662 * back up 1568 * back up
1663 */ 1569 */
1664 if (curr_link_state == LS_DOWN) { 1570 if (curr_link_state == LS_DOWN) {
1665 if (netif_msg_link(qdev)) 1571 netif_info(qdev, link, qdev->ndev, "Link is down\n");
1666 printk(KERN_INFO PFX "%s: Link is down.\n",
1667 qdev->ndev->name);
1668 qdev->port_link_state = LS_DOWN; 1572 qdev->port_link_state = LS_DOWN;
1669 } 1573 }
1670 if (ql_link_down_detect(qdev)) 1574 if (ql_link_down_detect(qdev))
@@ -1683,9 +1587,9 @@ static void ql_link_state_machine_work(struct work_struct *work)
1683static void ql_get_phy_owner(struct ql3_adapter *qdev) 1587static void ql_get_phy_owner(struct ql3_adapter *qdev)
1684{ 1588{
1685 if (ql_this_adapter_controls_port(qdev)) 1589 if (ql_this_adapter_controls_port(qdev))
1686 set_bit(QL_LINK_MASTER,&qdev->flags); 1590 set_bit(QL_LINK_MASTER, &qdev->flags);
1687 else 1591 else
1688 clear_bit(QL_LINK_MASTER,&qdev->flags); 1592 clear_bit(QL_LINK_MASTER, &qdev->flags);
1689} 1593}
1690 1594
1691/* 1595/*
@@ -1695,7 +1599,7 @@ static void ql_init_scan_mode(struct ql3_adapter *qdev)
1695{ 1599{
1696 ql_mii_enable_scan_mode(qdev); 1600 ql_mii_enable_scan_mode(qdev);
1697 1601
1698 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { 1602 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1699 if (ql_this_adapter_controls_port(qdev)) 1603 if (ql_this_adapter_controls_port(qdev))
1700 ql_petbi_init_ex(qdev); 1604 ql_petbi_init_ex(qdev);
1701 } else { 1605 } else {
@@ -1705,18 +1609,18 @@ static void ql_init_scan_mode(struct ql3_adapter *qdev)
1705} 1609}
1706 1610
1707/* 1611/*
1708 * MII_Setup needs to be called before taking the PHY out of reset so that the 1612 * MII_Setup needs to be called before taking the PHY out of reset
1709 * management interface clock speed can be set properly. It would be better if 1613 * so that the management interface clock speed can be set properly.
1710 * we had a way to disable MDC until after the PHY is out of reset, but we 1614 * It would be better if we had a way to disable MDC until after the
1711 * don't have that capability. 1615 * PHY is out of reset, but we don't have that capability.
1712 */ 1616 */
1713static int ql_mii_setup(struct ql3_adapter *qdev) 1617static int ql_mii_setup(struct ql3_adapter *qdev)
1714{ 1618{
1715 u32 reg; 1619 u32 reg;
1716 struct ql3xxx_port_registers __iomem *port_regs = 1620 struct ql3xxx_port_registers __iomem *port_regs =
1717 qdev->mem_map_registers; 1621 qdev->mem_map_registers;
1718 1622
1719 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1623 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1720 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1624 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1721 2) << 7)) 1625 2) << 7))
1722 return -1; 1626 return -1;
@@ -1735,24 +1639,24 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
1735 return 0; 1639 return 0;
1736} 1640}
1737 1641
1642#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \
1643 SUPPORTED_FIBRE | \
1644 SUPPORTED_Autoneg)
1645#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \
1646 SUPPORTED_10baseT_Full | \
1647 SUPPORTED_100baseT_Half | \
1648 SUPPORTED_100baseT_Full | \
1649 SUPPORTED_1000baseT_Half | \
1650 SUPPORTED_1000baseT_Full | \
1651 SUPPORTED_Autoneg | \
1652 SUPPORTED_TP); \
1653
1738static u32 ql_supported_modes(struct ql3_adapter *qdev) 1654static u32 ql_supported_modes(struct ql3_adapter *qdev)
1739{ 1655{
1740 u32 supported; 1656 if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
1657 return SUPPORTED_OPTICAL_MODES;
1741 1658
1742 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { 1659 return SUPPORTED_TP_MODES;
1743 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1744 | SUPPORTED_Autoneg;
1745 } else {
1746 supported = SUPPORTED_10baseT_Half
1747 | SUPPORTED_10baseT_Full
1748 | SUPPORTED_100baseT_Half
1749 | SUPPORTED_100baseT_Full
1750 | SUPPORTED_1000baseT_Half
1751 | SUPPORTED_1000baseT_Full
1752 | SUPPORTED_Autoneg | SUPPORTED_TP;
1753 }
1754
1755 return supported;
1756} 1660}
1757 1661
1758static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) 1662static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
@@ -1760,9 +1664,9 @@ static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1760 int status; 1664 int status;
1761 unsigned long hw_flags; 1665 unsigned long hw_flags;
1762 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1666 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1763 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1667 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1764 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1668 (QL_RESOURCE_BITS_BASE_CODE |
1765 2) << 7)) { 1669 (qdev->mac_index) * 2) << 7)) {
1766 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1670 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1767 return 0; 1671 return 0;
1768 } 1672 }
@@ -1777,9 +1681,9 @@ static u32 ql_get_speed(struct ql3_adapter *qdev)
1777 u32 status; 1681 u32 status;
1778 unsigned long hw_flags; 1682 unsigned long hw_flags;
1779 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1683 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1780 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1684 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1781 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1685 (QL_RESOURCE_BITS_BASE_CODE |
1782 2) << 7)) { 1686 (qdev->mac_index) * 2) << 7)) {
1783 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1687 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1784 return 0; 1688 return 0;
1785 } 1689 }
@@ -1794,9 +1698,9 @@ static int ql_get_full_dup(struct ql3_adapter *qdev)
1794 int status; 1698 int status;
1795 unsigned long hw_flags; 1699 unsigned long hw_flags;
1796 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1700 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1797 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1701 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1798 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1702 (QL_RESOURCE_BITS_BASE_CODE |
1799 2) << 7)) { 1703 (qdev->mac_index) * 2) << 7)) {
1800 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1704 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1801 return 0; 1705 return 0;
1802 } 1706 }
@@ -1806,7 +1710,6 @@ static int ql_get_full_dup(struct ql3_adapter *qdev)
1806 return status; 1710 return status;
1807} 1711}
1808 1712
1809
1810static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 1713static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1811{ 1714{
1812 struct ql3_adapter *qdev = netdev_priv(ndev); 1715 struct ql3_adapter *qdev = netdev_priv(ndev);
@@ -1814,7 +1717,7 @@ static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1814 ecmd->transceiver = XCVR_INTERNAL; 1717 ecmd->transceiver = XCVR_INTERNAL;
1815 ecmd->supported = ql_supported_modes(qdev); 1718 ecmd->supported = ql_supported_modes(qdev);
1816 1719
1817 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { 1720 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1818 ecmd->port = PORT_FIBRE; 1721 ecmd->port = PORT_FIBRE;
1819 } else { 1722 } else {
1820 ecmd->port = PORT_TP; 1723 ecmd->port = PORT_TP;
@@ -1855,10 +1758,11 @@ static void ql_get_pauseparam(struct net_device *ndev,
1855 struct ethtool_pauseparam *pause) 1758 struct ethtool_pauseparam *pause)
1856{ 1759{
1857 struct ql3_adapter *qdev = netdev_priv(ndev); 1760 struct ql3_adapter *qdev = netdev_priv(ndev);
1858 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 1761 struct ql3xxx_port_registers __iomem *port_regs =
1762 qdev->mem_map_registers;
1859 1763
1860 u32 reg; 1764 u32 reg;
1861 if(qdev->mac_index == 0) 1765 if (qdev->mac_index == 0)
1862 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); 1766 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1863 else 1767 else
1864 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); 1768 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
@@ -1885,12 +1789,12 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1885 1789
1886 while (lrg_buf_cb) { 1790 while (lrg_buf_cb) {
1887 if (!lrg_buf_cb->skb) { 1791 if (!lrg_buf_cb->skb) {
1888 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 1792 lrg_buf_cb->skb =
1889 qdev->lrg_buffer_len); 1793 netdev_alloc_skb(qdev->ndev,
1794 qdev->lrg_buffer_len);
1890 if (unlikely(!lrg_buf_cb->skb)) { 1795 if (unlikely(!lrg_buf_cb->skb)) {
1891 printk(KERN_DEBUG PFX 1796 netdev_printk(KERN_DEBUG, qdev->ndev,
1892 "%s: Failed netdev_alloc_skb().\n", 1797 "Failed netdev_alloc_skb()\n");
1893 qdev->ndev->name);
1894 break; 1798 break;
1895 } else { 1799 } else {
1896 /* 1800 /*
@@ -1905,9 +1809,10 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1905 PCI_DMA_FROMDEVICE); 1809 PCI_DMA_FROMDEVICE);
1906 1810
1907 err = pci_dma_mapping_error(qdev->pdev, map); 1811 err = pci_dma_mapping_error(qdev->pdev, map);
1908 if(err) { 1812 if (err) {
1909 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 1813 netdev_err(qdev->ndev,
1910 qdev->ndev->name, err); 1814 "PCI mapping failed with error: %d\n",
1815 err);
1911 dev_kfree_skb(lrg_buf_cb->skb); 1816 dev_kfree_skb(lrg_buf_cb->skb);
1912 lrg_buf_cb->skb = NULL; 1817 lrg_buf_cb->skb = NULL;
1913 break; 1818 break;
@@ -1915,9 +1820,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1915 1820
1916 1821
1917 lrg_buf_cb->buf_phy_addr_low = 1822 lrg_buf_cb->buf_phy_addr_low =
1918 cpu_to_le32(LS_64BITS(map)); 1823 cpu_to_le32(LS_64BITS(map));
1919 lrg_buf_cb->buf_phy_addr_high = 1824 lrg_buf_cb->buf_phy_addr_high =
1920 cpu_to_le32(MS_64BITS(map)); 1825 cpu_to_le32(MS_64BITS(map));
1921 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1826 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1922 dma_unmap_len_set(lrg_buf_cb, maplen, 1827 dma_unmap_len_set(lrg_buf_cb, maplen,
1923 qdev->lrg_buffer_len - 1828 qdev->lrg_buffer_len -
@@ -1937,7 +1842,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1937 */ 1842 */
1938static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) 1843static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1939{ 1844{
1940 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 1845 struct ql3xxx_port_registers __iomem *port_regs =
1846 qdev->mem_map_registers;
1847
1941 if (qdev->small_buf_release_cnt >= 16) { 1848 if (qdev->small_buf_release_cnt >= 16) {
1942 while (qdev->small_buf_release_cnt >= 16) { 1849 while (qdev->small_buf_release_cnt >= 16) {
1943 qdev->small_buf_q_producer_index++; 1850 qdev->small_buf_q_producer_index++;
@@ -1961,7 +1868,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1961 struct bufq_addr_element *lrg_buf_q_ele; 1868 struct bufq_addr_element *lrg_buf_q_ele;
1962 int i; 1869 int i;
1963 struct ql_rcv_buf_cb *lrg_buf_cb; 1870 struct ql_rcv_buf_cb *lrg_buf_cb;
1964 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 1871 struct ql3xxx_port_registers __iomem *port_regs =
1872 qdev->mem_map_registers;
1965 1873
1966 if ((qdev->lrg_buf_free_count >= 8) && 1874 if ((qdev->lrg_buf_free_count >= 8) &&
1967 (qdev->lrg_buf_release_cnt >= 16)) { 1875 (qdev->lrg_buf_release_cnt >= 16)) {
@@ -1989,7 +1897,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1989 1897
1990 qdev->lrg_buf_q_producer_index++; 1898 qdev->lrg_buf_q_producer_index++;
1991 1899
1992 if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries) 1900 if (qdev->lrg_buf_q_producer_index ==
1901 qdev->num_lbufq_entries)
1993 qdev->lrg_buf_q_producer_index = 0; 1902 qdev->lrg_buf_q_producer_index = 0;
1994 1903
1995 if (qdev->lrg_buf_q_producer_index == 1904 if (qdev->lrg_buf_q_producer_index ==
@@ -2011,23 +1920,26 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
2011 int i; 1920 int i;
2012 int retval = 0; 1921 int retval = 0;
2013 1922
2014 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1923 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2015 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); 1924 netdev_warn(qdev->ndev,
1925 "Frame too short but it was padded and sent\n");
2016 } 1926 }
2017 1927
2018 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1928 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
2019 1929
2020 /* Check the transmit response flags for any errors */ 1930 /* Check the transmit response flags for any errors */
2021 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1931 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2022 printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); 1932 netdev_err(qdev->ndev,
1933 "Frame too short to be legal, frame not sent\n");
2023 1934
2024 qdev->ndev->stats.tx_errors++; 1935 qdev->ndev->stats.tx_errors++;
2025 retval = -EIO; 1936 retval = -EIO;
2026 goto frame_not_sent; 1937 goto frame_not_sent;
2027 } 1938 }
2028 1939
2029 if(tx_cb->seg_count == 0) { 1940 if (tx_cb->seg_count == 0) {
2030 printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); 1941 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1942 mac_rsp->transaction_id);
2031 1943
2032 qdev->ndev->stats.tx_errors++; 1944 qdev->ndev->stats.tx_errors++;
2033 retval = -EIO; 1945 retval = -EIO;
@@ -2073,7 +1985,7 @@ static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
2073 qdev->lrg_buf_release_cnt++; 1985 qdev->lrg_buf_release_cnt++;
2074 if (++qdev->lrg_buf_index == qdev->num_large_buffers) 1986 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
2075 qdev->lrg_buf_index = 0; 1987 qdev->lrg_buf_index = 0;
2076 return(lrg_buf_cb); 1988 return lrg_buf_cb;
2077} 1989}
2078 1990
2079/* 1991/*
@@ -2177,12 +2089,11 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2177 if (checksum & 2089 if (checksum &
2178 (IB_IP_IOCB_RSP_3032_ICE | 2090 (IB_IP_IOCB_RSP_3032_ICE |
2179 IB_IP_IOCB_RSP_3032_CE)) { 2091 IB_IP_IOCB_RSP_3032_CE)) {
2180 printk(KERN_ERR 2092 netdev_err(ndev,
2181 "%s: Bad checksum for this %s packet, checksum = %x.\n", 2093 "%s: Bad checksum for this %s packet, checksum = %x\n",
2182 __func__, 2094 __func__,
2183 ((checksum & 2095 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
2184 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : 2096 "TCP" : "UDP"), checksum);
2185 "UDP"),checksum);
2186 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2097 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2187 (checksum & IB_IP_IOCB_RSP_3032_UDP && 2098 (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2188 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { 2099 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
@@ -2215,8 +2126,8 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2215 net_rsp = qdev->rsp_current; 2126 net_rsp = qdev->rsp_current;
2216 rmb(); 2127 rmb();
2217 /* 2128 /*
2218 * Fix 4032 chipe undocumented "feature" where bit-8 is set if the 2129 * Fix 4032 chip's undocumented "feature" where bit-8 is set
2219 * inbound completion is for a VLAN. 2130 * if the inbound completion is for a VLAN.
2220 */ 2131 */
2221 if (qdev->device_id == QL3032_DEVICE_ID) 2132 if (qdev->device_id == QL3032_DEVICE_ID)
2222 net_rsp->opcode &= 0x7f; 2133 net_rsp->opcode &= 0x7f;
@@ -2242,22 +2153,18 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2242 net_rsp); 2153 net_rsp);
2243 (*rx_cleaned)++; 2154 (*rx_cleaned)++;
2244 break; 2155 break;
2245 default: 2156 default: {
2246 { 2157 u32 *tmp = (u32 *)net_rsp;
2247 u32 *tmp = (u32 *) net_rsp; 2158 netdev_err(ndev,
2248 printk(KERN_ERR PFX 2159 "Hit default case, not handled!\n"
2249 "%s: Hit default case, not " 2160 " dropping the packet, opcode = %x\n"
2250 "handled!\n" 2161 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2251 " dropping the packet, opcode = " 2162 net_rsp->opcode,
2252 "%x.\n", 2163 (unsigned long int)tmp[0],
2253 ndev->name, net_rsp->opcode); 2164 (unsigned long int)tmp[1],
2254 printk(KERN_ERR PFX 2165 (unsigned long int)tmp[2],
2255 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", 2166 (unsigned long int)tmp[3]);
2256 (unsigned long int)tmp[0], 2167 }
2257 (unsigned long int)tmp[1],
2258 (unsigned long int)tmp[2],
2259 (unsigned long int)tmp[3]);
2260 }
2261 } 2168 }
2262 2169
2263 qdev->rsp_consumer_index++; 2170 qdev->rsp_consumer_index++;
@@ -2280,7 +2187,8 @@ static int ql_poll(struct napi_struct *napi, int budget)
2280 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); 2187 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2281 int rx_cleaned = 0, tx_cleaned = 0; 2188 int rx_cleaned = 0, tx_cleaned = 0;
2282 unsigned long hw_flags; 2189 unsigned long hw_flags;
2283 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2190 struct ql3xxx_port_registers __iomem *port_regs =
2191 qdev->mem_map_registers;
2284 2192
2285 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); 2193 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
2286 2194
@@ -2303,15 +2211,14 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2303 2211
2304 struct net_device *ndev = dev_id; 2212 struct net_device *ndev = dev_id;
2305 struct ql3_adapter *qdev = netdev_priv(ndev); 2213 struct ql3_adapter *qdev = netdev_priv(ndev);
2306 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2214 struct ql3xxx_port_registers __iomem *port_regs =
2215 qdev->mem_map_registers;
2307 u32 value; 2216 u32 value;
2308 int handled = 1; 2217 int handled = 1;
2309 u32 var; 2218 u32 var;
2310 2219
2311 port_regs = qdev->mem_map_registers; 2220 value = ql_read_common_reg_l(qdev,
2312 2221 &port_regs->CommonRegs.ispControlStatus);
2313 value =
2314 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2315 2222
2316 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { 2223 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2317 spin_lock(&qdev->adapter_lock); 2224 spin_lock(&qdev->adapter_lock);
@@ -2319,7 +2226,7 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2319 netif_carrier_off(qdev->ndev); 2226 netif_carrier_off(qdev->ndev);
2320 ql_disable_interrupts(qdev); 2227 ql_disable_interrupts(qdev);
2321 qdev->port_link_state = LS_DOWN; 2228 qdev->port_link_state = LS_DOWN;
2322 set_bit(QL_RESET_ACTIVE,&qdev->flags) ; 2229 set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
2323 2230
2324 if (value & ISP_CONTROL_FE) { 2231 if (value & ISP_CONTROL_FE) {
2325 /* 2232 /*
@@ -2328,69 +2235,53 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2328 var = 2235 var =
2329 ql_read_page0_reg_l(qdev, 2236 ql_read_page0_reg_l(qdev,
2330 &port_regs->PortFatalErrStatus); 2237 &port_regs->PortFatalErrStatus);
2331 printk(KERN_WARNING PFX 2238 netdev_warn(ndev,
2332 "%s: Resetting chip. PortFatalErrStatus " 2239 "Resetting chip. PortFatalErrStatus register = 0x%x\n",
2333 "register = 0x%x\n", ndev->name, var); 2240 var);
2334 set_bit(QL_RESET_START,&qdev->flags) ; 2241 set_bit(QL_RESET_START, &qdev->flags) ;
2335 } else { 2242 } else {
2336 /* 2243 /*
2337 * Soft Reset Requested. 2244 * Soft Reset Requested.
2338 */ 2245 */
2339 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ; 2246 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
2340 printk(KERN_ERR PFX 2247 netdev_err(ndev,
2341 "%s: Another function issued a reset to the " 2248 "Another function issued a reset to the chip. ISR value = %x\n",
2342 "chip. ISR value = %x.\n", ndev->name, value); 2249 value);
2343 } 2250 }
2344 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2251 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2345 spin_unlock(&qdev->adapter_lock); 2252 spin_unlock(&qdev->adapter_lock);
2346 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2253 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2347 ql_disable_interrupts(qdev); 2254 ql_disable_interrupts(qdev);
2348 if (likely(napi_schedule_prep(&qdev->napi))) { 2255 if (likely(napi_schedule_prep(&qdev->napi)))
2349 __napi_schedule(&qdev->napi); 2256 __napi_schedule(&qdev->napi);
2350 } 2257 } else
2351 } else {
2352 return IRQ_NONE; 2258 return IRQ_NONE;
2353 }
2354 2259
2355 return IRQ_RETVAL(handled); 2260 return IRQ_RETVAL(handled);
2356} 2261}
2357 2262
2358/* 2263/*
2359 * Get the total number of segments needed for the 2264 * Get the total number of segments needed for the given number of fragments.
2360 * given number of fragments. This is necessary because 2265 * This is necessary because outbound address lists (OAL) will be used when
2361 * outbound address lists (OAL) will be used when more than 2266 * more than two frags are given. Each address list has 5 addr/len pairs.
2362 * two frags are given. Each address list has 5 addr/len 2267 * The 5th pair in each OAL is used to point to the next OAL if more frags
2363 * pairs. The 5th pair in each AOL is used to point to 2268 * are coming. That is why the frags:segment count ratio is not linear.
2364 * the next AOL if more frags are coming.
2365 * That is why the frags:segment count ratio is not linear.
2366 */ 2269 */
2367static int ql_get_seg_count(struct ql3_adapter *qdev, 2270static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
2368 unsigned short frags)
2369{ 2271{
2370 if (qdev->device_id == QL3022_DEVICE_ID) 2272 if (qdev->device_id == QL3022_DEVICE_ID)
2371 return 1; 2273 return 1;
2372 2274
2373 switch(frags) { 2275 if (frags <= 2)
2374 case 0: return 1; /* just the skb->data seg */ 2276 return frags + 1;
2375 case 1: return 2; /* skb->data + 1 frag */ 2277 else if (frags <= 6)
2376 case 2: return 3; /* skb->data + 2 frags */ 2278 return frags + 2;
2377 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */ 2279 else if (frags <= 10)
2378 case 4: return 6; 2280 return frags + 3;
2379 case 5: return 7; 2281 else if (frags <= 14)
2380 case 6: return 8; 2282 return frags + 4;
2381 case 7: return 10; 2283 else if (frags <= 18)
2382 case 8: return 11; 2284 return frags + 5;
2383 case 9: return 12;
2384 case 10: return 13;
2385 case 11: return 15;
2386 case 12: return 16;
2387 case 13: return 17;
2388 case 14: return 18;
2389 case 15: return 20;
2390 case 16: return 21;
2391 case 17: return 22;
2392 case 18: return 23;
2393 }
2394 return -1; 2285 return -1;
2395} 2286}
2396 2287
@@ -2413,8 +2304,8 @@ static void ql_hw_csum_setup(const struct sk_buff *skb,
2413} 2304}
2414 2305
2415/* 2306/*
2416 * Map the buffers for this transmit. This will return 2307 * Map the buffers for this transmit.
2417 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 2308 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2418 */ 2309 */
2419static int ql_send_map(struct ql3_adapter *qdev, 2310static int ql_send_map(struct ql3_adapter *qdev,
2420 struct ob_mac_iocb_req *mac_iocb_ptr, 2311 struct ob_mac_iocb_req *mac_iocb_ptr,
@@ -2437,9 +2328,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
2437 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2328 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2438 2329
2439 err = pci_dma_mapping_error(qdev->pdev, map); 2330 err = pci_dma_mapping_error(qdev->pdev, map);
2440 if(err) { 2331 if (err) {
2441 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 2332 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2442 qdev->ndev->name, err); 2333 err);
2443 2334
2444 return NETDEV_TX_BUSY; 2335 return NETDEV_TX_BUSY;
2445 } 2336 }
@@ -2455,65 +2346,67 @@ static int ql_send_map(struct ql3_adapter *qdev,
2455 if (seg_cnt == 1) { 2346 if (seg_cnt == 1) {
2456 /* Terminate the last segment. */ 2347 /* Terminate the last segment. */
2457 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2348 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2458 } else { 2349 return NETDEV_TX_OK;
2459 oal = tx_cb->oal; 2350 }
2460 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) { 2351 oal = tx_cb->oal;
2461 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; 2352 for (completed_segs = 0;
2462 oal_entry++; 2353 completed_segs < frag_cnt;
2463 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ 2354 completed_segs++, seg++) {
2464 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ 2355 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2465 (seg == 12 && seg_cnt > 13) || /* but necessary. */ 2356 oal_entry++;
2466 (seg == 17 && seg_cnt > 18)) { 2357 /*
2467 /* Continuation entry points to outbound address list. */ 2358 * Check for continuation requirements.
2468 map = pci_map_single(qdev->pdev, oal, 2359 * It's strange but necessary.
2469 sizeof(struct oal), 2360 * Continuation entry points to outbound address list.
2470 PCI_DMA_TODEVICE); 2361 */
2471 2362 if ((seg == 2 && seg_cnt > 3) ||
2472 err = pci_dma_mapping_error(qdev->pdev, map); 2363 (seg == 7 && seg_cnt > 8) ||
2473 if(err) { 2364 (seg == 12 && seg_cnt > 13) ||
2474 2365 (seg == 17 && seg_cnt > 18)) {
2475 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", 2366 map = pci_map_single(qdev->pdev, oal,
2476 qdev->ndev->name, err); 2367 sizeof(struct oal),
2477 goto map_error; 2368 PCI_DMA_TODEVICE);
2478 }
2479
2480 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2481 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2482 oal_entry->len =
2483 cpu_to_le32(sizeof(struct oal) |
2484 OAL_CONT_ENTRY);
2485 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2486 map);
2487 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2488 sizeof(struct oal));
2489 oal_entry = (struct oal_entry *)oal;
2490 oal++;
2491 seg++;
2492 }
2493
2494 map =
2495 pci_map_page(qdev->pdev, frag->page,
2496 frag->page_offset, frag->size,
2497 PCI_DMA_TODEVICE);
2498 2369
2499 err = pci_dma_mapping_error(qdev->pdev, map); 2370 err = pci_dma_mapping_error(qdev->pdev, map);
2500 if(err) { 2371 if (err) {
2501 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", 2372 netdev_err(qdev->ndev,
2502 qdev->ndev->name, err); 2373 "PCI mapping outbound address list with error: %d\n",
2374 err);
2503 goto map_error; 2375 goto map_error;
2504 } 2376 }
2505 2377
2506 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2378 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2507 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2379 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2508 oal_entry->len = cpu_to_le32(frag->size); 2380 oal_entry->len = cpu_to_le32(sizeof(struct oal) |
2381 OAL_CONT_ENTRY);
2509 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2382 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2510 dma_unmap_len_set(&tx_cb->map[seg], maplen, 2383 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2511 frag->size); 2384 sizeof(struct oal));
2385 oal_entry = (struct oal_entry *)oal;
2386 oal++;
2387 seg++;
2388 }
2389
2390 map = pci_map_page(qdev->pdev, frag->page,
2391 frag->page_offset, frag->size,
2392 PCI_DMA_TODEVICE);
2393
2394 err = pci_dma_mapping_error(qdev->pdev, map);
2395 if (err) {
2396 netdev_err(qdev->ndev,
2397 "PCI mapping frags failed with error: %d\n",
2398 err);
2399 goto map_error;
2512 } 2400 }
2513 /* Terminate the last segment. */
2514 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2515 }
2516 2401
2402 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2403 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2404 oal_entry->len = cpu_to_le32(frag->size);
2405 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2406 dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size);
2407 }
2408 /* Terminate the last segment. */
2409 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2517 return NETDEV_TX_OK; 2410 return NETDEV_TX_OK;
2518 2411
2519map_error: 2412map_error:
@@ -2525,13 +2418,18 @@ map_error:
2525 seg = 1; 2418 seg = 1;
2526 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2419 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2527 oal = tx_cb->oal; 2420 oal = tx_cb->oal;
2528 for (i=0; i<completed_segs; i++,seg++) { 2421 for (i = 0; i < completed_segs; i++, seg++) {
2529 oal_entry++; 2422 oal_entry++;
2530 2423
2531 if((seg == 2 && seg_cnt > 3) || /* Check for continuation */ 2424 /*
2532 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ 2425 * Check for continuation requirements.
2533 (seg == 12 && seg_cnt > 13) || /* but necessary. */ 2426 * It's strange but necessary.
2534 (seg == 17 && seg_cnt > 18)) { 2427 */
2428
2429 if ((seg == 2 && seg_cnt > 3) ||
2430 (seg == 7 && seg_cnt > 8) ||
2431 (seg == 12 && seg_cnt > 13) ||
2432 (seg == 17 && seg_cnt > 18)) {
2535 pci_unmap_single(qdev->pdev, 2433 pci_unmap_single(qdev->pdev,
2536 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2434 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2537 dma_unmap_len(&tx_cb->map[seg], maplen), 2435 dma_unmap_len(&tx_cb->map[seg], maplen),
@@ -2570,19 +2468,20 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2570 struct net_device *ndev) 2468 struct net_device *ndev)
2571{ 2469{
2572 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2470 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2573 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2471 struct ql3xxx_port_registers __iomem *port_regs =
2472 qdev->mem_map_registers;
2574 struct ql_tx_buf_cb *tx_cb; 2473 struct ql_tx_buf_cb *tx_cb;
2575 u32 tot_len = skb->len; 2474 u32 tot_len = skb->len;
2576 struct ob_mac_iocb_req *mac_iocb_ptr; 2475 struct ob_mac_iocb_req *mac_iocb_ptr;
2577 2476
2578 if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2477 if (unlikely(atomic_read(&qdev->tx_count) < 2))
2579 return NETDEV_TX_BUSY; 2478 return NETDEV_TX_BUSY;
2580 }
2581 2479
2582 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; 2480 tx_cb = &qdev->tx_buf[qdev->req_producer_index];
2583 if((tx_cb->seg_count = ql_get_seg_count(qdev, 2481 tx_cb->seg_count = ql_get_seg_count(qdev,
2584 (skb_shinfo(skb)->nr_frags))) == -1) { 2482 skb_shinfo(skb)->nr_frags);
2585 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2483 if (tx_cb->seg_count == -1) {
2484 netdev_err(ndev, "%s: invalid segment count!\n", __func__);
2586 return NETDEV_TX_OK; 2485 return NETDEV_TX_OK;
2587 } 2486 }
2588 2487
@@ -2598,8 +2497,8 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2598 skb->ip_summed == CHECKSUM_PARTIAL) 2497 skb->ip_summed == CHECKSUM_PARTIAL)
2599 ql_hw_csum_setup(skb, mac_iocb_ptr); 2498 ql_hw_csum_setup(skb, mac_iocb_ptr);
2600 2499
2601 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { 2500 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
2602 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); 2501 netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
2603 return NETDEV_TX_BUSY; 2502 return NETDEV_TX_BUSY;
2604 } 2503 }
2605 2504
@@ -2612,9 +2511,9 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2612 &port_regs->CommonRegs.reqQProducerIndex, 2511 &port_regs->CommonRegs.reqQProducerIndex,
2613 qdev->req_producer_index); 2512 qdev->req_producer_index);
2614 2513
2615 if (netif_msg_tx_queued(qdev)) 2514 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2616 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", 2515 "tx queued, slot %d, len %d\n",
2617 ndev->name, qdev->req_producer_index, skb->len); 2516 qdev->req_producer_index, skb->len);
2618 2517
2619 atomic_dec(&qdev->tx_count); 2518 atomic_dec(&qdev->tx_count);
2620 return NETDEV_TX_OK; 2519 return NETDEV_TX_OK;
@@ -2632,8 +2531,7 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2632 2531
2633 if ((qdev->req_q_virt_addr == NULL) || 2532 if ((qdev->req_q_virt_addr == NULL) ||
2634 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { 2533 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2635 printk(KERN_ERR PFX "%s: reqQ failed.\n", 2534 netdev_err(qdev->ndev, "reqQ failed\n");
2636 qdev->ndev->name);
2637 return -ENOMEM; 2535 return -ENOMEM;
2638 } 2536 }
2639 2537
@@ -2646,25 +2544,22 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2646 2544
2647 if ((qdev->rsp_q_virt_addr == NULL) || 2545 if ((qdev->rsp_q_virt_addr == NULL) ||
2648 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { 2546 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2649 printk(KERN_ERR PFX 2547 netdev_err(qdev->ndev, "rspQ allocation failed\n");
2650 "%s: rspQ allocation failed\n",
2651 qdev->ndev->name);
2652 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, 2548 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2653 qdev->req_q_virt_addr, 2549 qdev->req_q_virt_addr,
2654 qdev->req_q_phy_addr); 2550 qdev->req_q_phy_addr);
2655 return -ENOMEM; 2551 return -ENOMEM;
2656 } 2552 }
2657 2553
2658 set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); 2554 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2659 2555
2660 return 0; 2556 return 0;
2661} 2557}
2662 2558
2663static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) 2559static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2664{ 2560{
2665 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) { 2561 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
2666 printk(KERN_INFO PFX 2562 netdev_info(qdev->ndev, "Already done\n");
2667 "%s: Already done.\n", qdev->ndev->name);
2668 return; 2563 return;
2669 } 2564 }
2670 2565
@@ -2680,34 +2575,34 @@ static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2680 2575
2681 qdev->rsp_q_virt_addr = NULL; 2576 qdev->rsp_q_virt_addr = NULL;
2682 2577
2683 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); 2578 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2684} 2579}
2685 2580
2686static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) 2581static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2687{ 2582{
2688 /* Create Large Buffer Queue */ 2583 /* Create Large Buffer Queue */
2689 qdev->lrg_buf_q_size = 2584 qdev->lrg_buf_q_size =
2690 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); 2585 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2691 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2586 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2692 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2587 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2693 else 2588 else
2694 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2589 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2695 2590
2696 qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL); 2591 qdev->lrg_buf =
2592 kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),
2593 GFP_KERNEL);
2697 if (qdev->lrg_buf == NULL) { 2594 if (qdev->lrg_buf == NULL) {
2698 printk(KERN_ERR PFX 2595 netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
2699 "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2700 return -ENOMEM; 2596 return -ENOMEM;
2701 } 2597 }
2702 2598
2703 qdev->lrg_buf_q_alloc_virt_addr = 2599 qdev->lrg_buf_q_alloc_virt_addr =
2704 pci_alloc_consistent(qdev->pdev, 2600 pci_alloc_consistent(qdev->pdev,
2705 qdev->lrg_buf_q_alloc_size, 2601 qdev->lrg_buf_q_alloc_size,
2706 &qdev->lrg_buf_q_alloc_phy_addr); 2602 &qdev->lrg_buf_q_alloc_phy_addr);
2707 2603
2708 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { 2604 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2709 printk(KERN_ERR PFX 2605 netdev_err(qdev->ndev, "lBufQ failed\n");
2710 "%s: lBufQ failed\n", qdev->ndev->name);
2711 return -ENOMEM; 2606 return -ENOMEM;
2712 } 2607 }
2713 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; 2608 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
@@ -2715,21 +2610,19 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2715 2610
2716 /* Create Small Buffer Queue */ 2611 /* Create Small Buffer Queue */
2717 qdev->small_buf_q_size = 2612 qdev->small_buf_q_size =
2718 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2613 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2719 if (qdev->small_buf_q_size < PAGE_SIZE) 2614 if (qdev->small_buf_q_size < PAGE_SIZE)
2720 qdev->small_buf_q_alloc_size = PAGE_SIZE; 2615 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2721 else 2616 else
2722 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; 2617 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2723 2618
2724 qdev->small_buf_q_alloc_virt_addr = 2619 qdev->small_buf_q_alloc_virt_addr =
2725 pci_alloc_consistent(qdev->pdev, 2620 pci_alloc_consistent(qdev->pdev,
2726 qdev->small_buf_q_alloc_size, 2621 qdev->small_buf_q_alloc_size,
2727 &qdev->small_buf_q_alloc_phy_addr); 2622 &qdev->small_buf_q_alloc_phy_addr);
2728 2623
2729 if (qdev->small_buf_q_alloc_virt_addr == NULL) { 2624 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2730 printk(KERN_ERR PFX 2625 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2731 "%s: Small Buffer Queue allocation failed.\n",
2732 qdev->ndev->name);
2733 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, 2626 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2734 qdev->lrg_buf_q_alloc_virt_addr, 2627 qdev->lrg_buf_q_alloc_virt_addr,
2735 qdev->lrg_buf_q_alloc_phy_addr); 2628 qdev->lrg_buf_q_alloc_phy_addr);
@@ -2738,18 +2631,17 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2738 2631
2739 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; 2632 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2740 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; 2633 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2741 set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); 2634 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2742 return 0; 2635 return 0;
2743} 2636}
2744 2637
2745static void ql_free_buffer_queues(struct ql3_adapter *qdev) 2638static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2746{ 2639{
2747 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) { 2640 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
2748 printk(KERN_INFO PFX 2641 netdev_info(qdev->ndev, "Already done\n");
2749 "%s: Already done.\n", qdev->ndev->name);
2750 return; 2642 return;
2751 } 2643 }
2752 if(qdev->lrg_buf) kfree(qdev->lrg_buf); 2644 kfree(qdev->lrg_buf);
2753 pci_free_consistent(qdev->pdev, 2645 pci_free_consistent(qdev->pdev,
2754 qdev->lrg_buf_q_alloc_size, 2646 qdev->lrg_buf_q_alloc_size,
2755 qdev->lrg_buf_q_alloc_virt_addr, 2647 qdev->lrg_buf_q_alloc_virt_addr,
@@ -2764,7 +2656,7 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2764 2656
2765 qdev->small_buf_q_virt_addr = NULL; 2657 qdev->small_buf_q_virt_addr = NULL;
2766 2658
2767 clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); 2659 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2768} 2660}
2769 2661
2770static int ql_alloc_small_buffers(struct ql3_adapter *qdev) 2662static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
@@ -2774,18 +2666,16 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2774 2666
2775 /* Currently we allocate on one of memory and use it for smallbuffers */ 2667 /* Currently we allocate on one of memory and use it for smallbuffers */
2776 qdev->small_buf_total_size = 2668 qdev->small_buf_total_size =
2777 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * 2669 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2778 QL_SMALL_BUFFER_SIZE); 2670 QL_SMALL_BUFFER_SIZE);
2779 2671
2780 qdev->small_buf_virt_addr = 2672 qdev->small_buf_virt_addr =
2781 pci_alloc_consistent(qdev->pdev, 2673 pci_alloc_consistent(qdev->pdev,
2782 qdev->small_buf_total_size, 2674 qdev->small_buf_total_size,
2783 &qdev->small_buf_phy_addr); 2675 &qdev->small_buf_phy_addr);
2784 2676
2785 if (qdev->small_buf_virt_addr == NULL) { 2677 if (qdev->small_buf_virt_addr == NULL) {
2786 printk(KERN_ERR PFX 2678 netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2787 "%s: Failed to get small buffer memory.\n",
2788 qdev->ndev->name);
2789 return -ENOMEM; 2679 return -ENOMEM;
2790 } 2680 }
2791 2681
@@ -2804,15 +2694,14 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2804 small_buf_q_entry++; 2694 small_buf_q_entry++;
2805 } 2695 }
2806 qdev->small_buf_index = 0; 2696 qdev->small_buf_index = 0;
2807 set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags); 2697 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
2808 return 0; 2698 return 0;
2809} 2699}
2810 2700
2811static void ql_free_small_buffers(struct ql3_adapter *qdev) 2701static void ql_free_small_buffers(struct ql3_adapter *qdev)
2812{ 2702{
2813 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) { 2703 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
2814 printk(KERN_INFO PFX 2704 netdev_info(qdev->ndev, "Already done\n");
2815 "%s: Already done.\n", qdev->ndev->name);
2816 return; 2705 return;
2817 } 2706 }
2818 if (qdev->small_buf_virt_addr != NULL) { 2707 if (qdev->small_buf_virt_addr != NULL) {
@@ -2874,11 +2763,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2874 qdev->lrg_buffer_len); 2763 qdev->lrg_buffer_len);
2875 if (unlikely(!skb)) { 2764 if (unlikely(!skb)) {
2876 /* Better luck next round */ 2765 /* Better luck next round */
2877 printk(KERN_ERR PFX 2766 netdev_err(qdev->ndev,
2878 "%s: large buff alloc failed, " 2767 "large buff alloc failed for %d bytes at index %d\n",
2879 "for %d bytes at index %d.\n", 2768 qdev->lrg_buffer_len * 2, i);
2880 qdev->ndev->name,
2881 qdev->lrg_buffer_len * 2, i);
2882 ql_free_large_buffers(qdev); 2769 ql_free_large_buffers(qdev);
2883 return -ENOMEM; 2770 return -ENOMEM;
2884 } else { 2771 } else {
@@ -2899,9 +2786,10 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2899 PCI_DMA_FROMDEVICE); 2786 PCI_DMA_FROMDEVICE);
2900 2787
2901 err = pci_dma_mapping_error(qdev->pdev, map); 2788 err = pci_dma_mapping_error(qdev->pdev, map);
2902 if(err) { 2789 if (err) {
2903 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 2790 netdev_err(qdev->ndev,
2904 qdev->ndev->name, err); 2791 "PCI mapping failed with error: %d\n",
2792 err);
2905 ql_free_large_buffers(qdev); 2793 ql_free_large_buffers(qdev);
2906 return -ENOMEM; 2794 return -ENOMEM;
2907 } 2795 }
@@ -2926,10 +2814,8 @@ static void ql_free_send_free_list(struct ql3_adapter *qdev)
2926 2814
2927 tx_cb = &qdev->tx_buf[0]; 2815 tx_cb = &qdev->tx_buf[0];
2928 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2816 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2929 if (tx_cb->oal) { 2817 kfree(tx_cb->oal);
2930 kfree(tx_cb->oal); 2818 tx_cb->oal = NULL;
2931 tx_cb->oal = NULL;
2932 }
2933 tx_cb++; 2819 tx_cb++;
2934 } 2820 }
2935} 2821}
@@ -2938,8 +2824,7 @@ static int ql_create_send_free_list(struct ql3_adapter *qdev)
2938{ 2824{
2939 struct ql_tx_buf_cb *tx_cb; 2825 struct ql_tx_buf_cb *tx_cb;
2940 int i; 2826 int i;
2941 struct ob_mac_iocb_req *req_q_curr = 2827 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
2942 qdev->req_q_virt_addr;
2943 2828
2944 /* Create free list of transmit buffers */ 2829 /* Create free list of transmit buffers */
2945 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2830 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
@@ -2960,23 +2845,22 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2960 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { 2845 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2961 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; 2846 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2962 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2847 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2963 } 2848 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2964 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2965 /* 2849 /*
2966 * Bigger buffers, so less of them. 2850 * Bigger buffers, so less of them.
2967 */ 2851 */
2968 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; 2852 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2969 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2853 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2970 } else { 2854 } else {
2971 printk(KERN_ERR PFX 2855 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
2972 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n", 2856 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2973 qdev->ndev->name);
2974 return -ENOMEM; 2857 return -ENOMEM;
2975 } 2858 }
2976 qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; 2859 qdev->num_large_buffers =
2860 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2977 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2861 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2978 qdev->max_frame_size = 2862 qdev->max_frame_size =
2979 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2863 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2980 2864
2981 /* 2865 /*
2982 * First allocate a page of shared memory and use it for shadow 2866 * First allocate a page of shared memory and use it for shadow
@@ -2984,51 +2868,44 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2984 * Network Completion Queue Producer Index Register 2868 * Network Completion Queue Producer Index Register
2985 */ 2869 */
2986 qdev->shadow_reg_virt_addr = 2870 qdev->shadow_reg_virt_addr =
2987 pci_alloc_consistent(qdev->pdev, 2871 pci_alloc_consistent(qdev->pdev,
2988 PAGE_SIZE, &qdev->shadow_reg_phy_addr); 2872 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2989 2873
2990 if (qdev->shadow_reg_virt_addr != NULL) { 2874 if (qdev->shadow_reg_virt_addr != NULL) {
2991 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; 2875 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
2992 qdev->req_consumer_index_phy_addr_high = 2876 qdev->req_consumer_index_phy_addr_high =
2993 MS_64BITS(qdev->shadow_reg_phy_addr); 2877 MS_64BITS(qdev->shadow_reg_phy_addr);
2994 qdev->req_consumer_index_phy_addr_low = 2878 qdev->req_consumer_index_phy_addr_low =
2995 LS_64BITS(qdev->shadow_reg_phy_addr); 2879 LS_64BITS(qdev->shadow_reg_phy_addr);
2996 2880
2997 qdev->prsp_producer_index = 2881 qdev->prsp_producer_index =
2998 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); 2882 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2999 qdev->rsp_producer_index_phy_addr_high = 2883 qdev->rsp_producer_index_phy_addr_high =
3000 qdev->req_consumer_index_phy_addr_high; 2884 qdev->req_consumer_index_phy_addr_high;
3001 qdev->rsp_producer_index_phy_addr_low = 2885 qdev->rsp_producer_index_phy_addr_low =
3002 qdev->req_consumer_index_phy_addr_low + 8; 2886 qdev->req_consumer_index_phy_addr_low + 8;
3003 } else { 2887 } else {
3004 printk(KERN_ERR PFX 2888 netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
3005 "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
3006 return -ENOMEM; 2889 return -ENOMEM;
3007 } 2890 }
3008 2891
3009 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { 2892 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
3010 printk(KERN_ERR PFX 2893 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
3011 "%s: ql_alloc_net_req_rsp_queues failed.\n",
3012 qdev->ndev->name);
3013 goto err_req_rsp; 2894 goto err_req_rsp;
3014 } 2895 }
3015 2896
3016 if (ql_alloc_buffer_queues(qdev) != 0) { 2897 if (ql_alloc_buffer_queues(qdev) != 0) {
3017 printk(KERN_ERR PFX 2898 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
3018 "%s: ql_alloc_buffer_queues failed.\n",
3019 qdev->ndev->name);
3020 goto err_buffer_queues; 2899 goto err_buffer_queues;
3021 } 2900 }
3022 2901
3023 if (ql_alloc_small_buffers(qdev) != 0) { 2902 if (ql_alloc_small_buffers(qdev) != 0) {
3024 printk(KERN_ERR PFX 2903 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
3025 "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
3026 goto err_small_buffers; 2904 goto err_small_buffers;
3027 } 2905 }
3028 2906
3029 if (ql_alloc_large_buffers(qdev) != 0) { 2907 if (ql_alloc_large_buffers(qdev) != 0) {
3030 printk(KERN_ERR PFX 2908 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
3031 "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
3032 goto err_small_buffers; 2909 goto err_small_buffers;
3033 } 2910 }
3034 2911
@@ -3076,7 +2953,7 @@ static int ql_init_misc_registers(struct ql3_adapter *qdev)
3076 struct ql3xxx_local_ram_registers __iomem *local_ram = 2953 struct ql3xxx_local_ram_registers __iomem *local_ram =
3077 (void __iomem *)qdev->mem_map_registers; 2954 (void __iomem *)qdev->mem_map_registers;
3078 2955
3079 if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, 2956 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
3080 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2957 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3081 2) << 4)) 2958 2) << 4))
3082 return -1; 2959 return -1;
@@ -3132,18 +3009,20 @@ static int ql_init_misc_registers(struct ql3_adapter *qdev)
3132static int ql_adapter_initialize(struct ql3_adapter *qdev) 3009static int ql_adapter_initialize(struct ql3_adapter *qdev)
3133{ 3010{
3134 u32 value; 3011 u32 value;
3135 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 3012 struct ql3xxx_port_registers __iomem *port_regs =
3013 qdev->mem_map_registers;
3014 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
3136 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3137 (void __iomem *)port_regs; 3016 (void __iomem *)port_regs;
3138 u32 delay = 10; 3017 u32 delay = 10;
3139 int status = 0; 3018 int status = 0;
3140 unsigned long hw_flags = 0; 3019 unsigned long hw_flags = 0;
3141 3020
3142 if(ql_mii_setup(qdev)) 3021 if (ql_mii_setup(qdev))
3143 return -1; 3022 return -1;
3144 3023
3145 /* Bring out PHY out of reset */ 3024 /* Bring out PHY out of reset */
3146 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 3025 ql_write_common_reg(qdev, spir,
3147 (ISP_SERIAL_PORT_IF_WE | 3026 (ISP_SERIAL_PORT_IF_WE |
3148 (ISP_SERIAL_PORT_IF_WE << 16))); 3027 (ISP_SERIAL_PORT_IF_WE << 16)));
3149 /* Give the PHY time to come out of reset. */ 3028 /* Give the PHY time to come out of reset. */
@@ -3152,13 +3031,13 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3152 netif_carrier_off(qdev->ndev); 3031 netif_carrier_off(qdev->ndev);
3153 3032
3154 /* V2 chip fix for ARS-39168. */ 3033 /* V2 chip fix for ARS-39168. */
3155 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 3034 ql_write_common_reg(qdev, spir,
3156 (ISP_SERIAL_PORT_IF_SDE | 3035 (ISP_SERIAL_PORT_IF_SDE |
3157 (ISP_SERIAL_PORT_IF_SDE << 16))); 3036 (ISP_SERIAL_PORT_IF_SDE << 16)));
3158 3037
3159 /* Request Queue Registers */ 3038 /* Request Queue Registers */
3160 *((u32 *) (qdev->preq_consumer_index)) = 0; 3039 *((u32 *)(qdev->preq_consumer_index)) = 0;
3161 atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES); 3040 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
3162 qdev->req_producer_index = 0; 3041 qdev->req_producer_index = 0;
3163 3042
3164 ql_write_page1_reg(qdev, 3043 ql_write_page1_reg(qdev,
@@ -3208,7 +3087,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3208 &hmem_regs->rxLargeQBaseAddrLow, 3087 &hmem_regs->rxLargeQBaseAddrLow,
3209 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3088 LS_64BITS(qdev->lrg_buf_q_phy_addr));
3210 3089
3211 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries); 3090 ql_write_page1_reg(qdev,
3091 &hmem_regs->rxLargeQLength,
3092 qdev->num_lbufq_entries);
3212 3093
3213 ql_write_page1_reg(qdev, 3094 ql_write_page1_reg(qdev,
3214 &hmem_regs->rxLargeBufferLength, 3095 &hmem_regs->rxLargeBufferLength,
@@ -3258,7 +3139,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3258 if ((value & PORT_STATUS_IC) == 0) { 3139 if ((value & PORT_STATUS_IC) == 0) {
3259 3140
3260 /* Chip has not been configured yet, so let it rip. */ 3141 /* Chip has not been configured yet, so let it rip. */
3261 if(ql_init_misc_registers(qdev)) { 3142 if (ql_init_misc_registers(qdev)) {
3262 status = -1; 3143 status = -1;
3263 goto out; 3144 goto out;
3264 } 3145 }
@@ -3268,7 +3149,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3268 3149
3269 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; 3150 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3270 3151
3271 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, 3152 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3272 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 3153 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3273 * 2) << 13)) { 3154 * 2) << 13)) {
3274 status = -1; 3155 status = -1;
@@ -3291,7 +3172,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3291 &port_regs->mac0MaxFrameLengthReg, 3172 &port_regs->mac0MaxFrameLengthReg,
3292 qdev->max_frame_size); 3173 qdev->max_frame_size);
3293 3174
3294 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 3175 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3295 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 3176 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3296 2) << 7)) { 3177 2) << 7)) {
3297 status = -1; 3178 status = -1;
@@ -3353,8 +3234,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3353 } while (--delay); 3234 } while (--delay);
3354 3235
3355 if (delay == 0) { 3236 if (delay == 0) {
3356 printk(KERN_ERR PFX 3237 netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3357 "%s: Hw Initialization timeout.\n", qdev->ndev->name);
3358 status = -1; 3238 status = -1;
3359 goto out; 3239 goto out;
3360 } 3240 }
@@ -3385,7 +3265,8 @@ out:
3385 */ 3265 */
3386static int ql_adapter_reset(struct ql3_adapter *qdev) 3266static int ql_adapter_reset(struct ql3_adapter *qdev)
3387{ 3267{
3388 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 3268 struct ql3xxx_port_registers __iomem *port_regs =
3269 qdev->mem_map_registers;
3389 int status = 0; 3270 int status = 0;
3390 u16 value; 3271 u16 value;
3391 int max_wait_time; 3272 int max_wait_time;
@@ -3396,17 +3277,14 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
3396 /* 3277 /*
3397 * Issue soft reset to chip. 3278 * Issue soft reset to chip.
3398 */ 3279 */
3399 printk(KERN_DEBUG PFX 3280 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3400 "%s: Issue soft reset to chip.\n",
3401 qdev->ndev->name);
3402 ql_write_common_reg(qdev, 3281 ql_write_common_reg(qdev,
3403 &port_regs->CommonRegs.ispControlStatus, 3282 &port_regs->CommonRegs.ispControlStatus,
3404 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); 3283 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3405 3284
3406 /* Wait 3 seconds for reset to complete. */ 3285 /* Wait 3 seconds for reset to complete. */
3407 printk(KERN_DEBUG PFX 3286 netdev_printk(KERN_DEBUG, qdev->ndev,
3408 "%s: Wait 10 milliseconds for reset to complete.\n", 3287 "Wait 10 milliseconds for reset to complete\n");
3409 qdev->ndev->name);
3410 3288
3411 /* Wait until the firmware tells us the Soft Reset is done */ 3289 /* Wait until the firmware tells us the Soft Reset is done */
3412 max_wait_time = 5; 3290 max_wait_time = 5;
@@ -3427,8 +3305,8 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
3427 value = 3305 value =
3428 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 3306 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3429 if (value & ISP_CONTROL_RI) { 3307 if (value & ISP_CONTROL_RI) {
3430 printk(KERN_DEBUG PFX 3308 netdev_printk(KERN_DEBUG, qdev->ndev,
3431 "ql_adapter_reset: clearing RI after reset.\n"); 3309 "clearing RI after reset\n");
3432 ql_write_common_reg(qdev, 3310 ql_write_common_reg(qdev,
3433 &port_regs->CommonRegs. 3311 &port_regs->CommonRegs.
3434 ispControlStatus, 3312 ispControlStatus,
@@ -3448,13 +3326,11 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
3448 */ 3326 */
3449 max_wait_time = 5; 3327 max_wait_time = 5;
3450 do { 3328 do {
3451 value = 3329 value = ql_read_common_reg(qdev,
3452 ql_read_common_reg(qdev, 3330 &port_regs->CommonRegs.
3453 &port_regs->CommonRegs. 3331 ispControlStatus);
3454 ispControlStatus); 3332 if ((value & ISP_CONTROL_FSR) == 0)
3455 if ((value & ISP_CONTROL_FSR) == 0) {
3456 break; 3333 break;
3457 }
3458 ssleep(1); 3334 ssleep(1);
3459 } while ((--max_wait_time)); 3335 } while ((--max_wait_time));
3460 } 3336 }
@@ -3468,7 +3344,8 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
3468 3344
3469static void ql_set_mac_info(struct ql3_adapter *qdev) 3345static void ql_set_mac_info(struct ql3_adapter *qdev)
3470{ 3346{
3471 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 3347 struct ql3xxx_port_registers __iomem *port_regs =
3348 qdev->mem_map_registers;
3472 u32 value, port_status; 3349 u32 value, port_status;
3473 u8 func_number; 3350 u8 func_number;
3474 3351
@@ -3484,9 +3361,9 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3484 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3361 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3485 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3362 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3486 if (port_status & PORT_STATUS_SM0) 3363 if (port_status & PORT_STATUS_SM0)
3487 set_bit(QL_LINK_OPTICAL,&qdev->flags); 3364 set_bit(QL_LINK_OPTICAL, &qdev->flags);
3488 else 3365 else
3489 clear_bit(QL_LINK_OPTICAL,&qdev->flags); 3366 clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3490 break; 3367 break;
3491 3368
3492 case ISP_CONTROL_FN1_NET: 3369 case ISP_CONTROL_FN1_NET:
@@ -3495,17 +3372,17 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3495 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3372 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3496 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3373 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3497 if (port_status & PORT_STATUS_SM1) 3374 if (port_status & PORT_STATUS_SM1)
3498 set_bit(QL_LINK_OPTICAL,&qdev->flags); 3375 set_bit(QL_LINK_OPTICAL, &qdev->flags);
3499 else 3376 else
3500 clear_bit(QL_LINK_OPTICAL,&qdev->flags); 3377 clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3501 break; 3378 break;
3502 3379
3503 case ISP_CONTROL_FN0_SCSI: 3380 case ISP_CONTROL_FN0_SCSI:
3504 case ISP_CONTROL_FN1_SCSI: 3381 case ISP_CONTROL_FN1_SCSI:
3505 default: 3382 default:
3506 printk(KERN_DEBUG PFX 3383 netdev_printk(KERN_DEBUG, qdev->ndev,
3507 "%s: Invalid function number, ispControlStatus = 0x%x\n", 3384 "Invalid function number, ispControlStatus = 0x%x\n",
3508 qdev->ndev->name,value); 3385 value);
3509 break; 3386 break;
3510 } 3387 }
3511 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; 3388 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
@@ -3516,32 +3393,26 @@ static void ql_display_dev_info(struct net_device *ndev)
3516 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3393 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3517 struct pci_dev *pdev = qdev->pdev; 3394 struct pci_dev *pdev = qdev->pdev;
3518 3395
3519 printk(KERN_INFO PFX 3396 netdev_info(ndev,
3520 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n", 3397 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
3521 DRV_NAME, qdev->index, qdev->chip_rev_id, 3398 DRV_NAME, qdev->index, qdev->chip_rev_id,
3522 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022", 3399 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
3523 qdev->pci_slot); 3400 qdev->pci_slot);
3524 printk(KERN_INFO PFX 3401 netdev_info(ndev, "%s Interface\n",
3525 "%s Interface.\n", 3402 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3526 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
3527 3403
3528 /* 3404 /*
3529 * Print PCI bus width/type. 3405 * Print PCI bus width/type.
3530 */ 3406 */
3531 printk(KERN_INFO PFX 3407 netdev_info(ndev, "Bus interface is %s %s\n",
3532 "Bus interface is %s %s.\n", 3408 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3533 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), 3409 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3534 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3535 3410
3536 printk(KERN_INFO PFX 3411 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n",
3537 "mem IO base address adjusted = 0x%p\n", 3412 qdev->mem_map_registers);
3538 qdev->mem_map_registers); 3413 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
3539 printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
3540 3414
3541 if (netif_msg_probe(qdev)) 3415 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3542 printk(KERN_INFO PFX
3543 "%s: MAC address %pM\n",
3544 ndev->name, ndev->dev_addr);
3545} 3416}
3546 3417
3547static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3418static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
@@ -3552,17 +3423,16 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3552 netif_stop_queue(ndev); 3423 netif_stop_queue(ndev);
3553 netif_carrier_off(ndev); 3424 netif_carrier_off(ndev);
3554 3425
3555 clear_bit(QL_ADAPTER_UP,&qdev->flags); 3426 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3556 clear_bit(QL_LINK_MASTER,&qdev->flags); 3427 clear_bit(QL_LINK_MASTER, &qdev->flags);
3557 3428
3558 ql_disable_interrupts(qdev); 3429 ql_disable_interrupts(qdev);
3559 3430
3560 free_irq(qdev->pdev->irq, ndev); 3431 free_irq(qdev->pdev->irq, ndev);
3561 3432
3562 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { 3433 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3563 printk(KERN_INFO PFX 3434 netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3564 "%s: calling pci_disable_msi().\n", qdev->ndev->name); 3435 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3565 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3566 pci_disable_msi(qdev->pdev); 3436 pci_disable_msi(qdev->pdev);
3567 } 3437 }
3568 3438
@@ -3576,17 +3446,16 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3576 3446
3577 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3447 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3578 if (ql_wait_for_drvr_lock(qdev)) { 3448 if (ql_wait_for_drvr_lock(qdev)) {
3579 if ((soft_reset = ql_adapter_reset(qdev))) { 3449 soft_reset = ql_adapter_reset(qdev);
3580 printk(KERN_ERR PFX 3450 if (soft_reset) {
3581 "%s: ql_adapter_reset(%d) FAILED!\n", 3451 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
3582 ndev->name, qdev->index); 3452 qdev->index);
3583 } 3453 }
3584 printk(KERN_ERR PFX 3454 netdev_err(ndev,
3585 "%s: Releaseing driver lock via chip reset.\n",ndev->name); 3455 "Releasing driver lock via chip reset\n");
3586 } else { 3456 } else {
3587 printk(KERN_ERR PFX 3457 netdev_err(ndev,
3588 "%s: Could not acquire driver lock to do " 3458 "Could not acquire driver lock to do reset!\n");
3589 "reset!\n", ndev->name);
3590 retval = -1; 3459 retval = -1;
3591 } 3460 }
3592 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3461 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -3603,56 +3472,50 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
3603 unsigned long hw_flags; 3472 unsigned long hw_flags;
3604 3473
3605 if (ql_alloc_mem_resources(qdev)) { 3474 if (ql_alloc_mem_resources(qdev)) {
3606 printk(KERN_ERR PFX 3475 netdev_err(ndev, "Unable to allocate buffers\n");
3607 "%s Unable to allocate buffers.\n", ndev->name);
3608 return -ENOMEM; 3476 return -ENOMEM;
3609 } 3477 }
3610 3478
3611 if (qdev->msi) { 3479 if (qdev->msi) {
3612 if (pci_enable_msi(qdev->pdev)) { 3480 if (pci_enable_msi(qdev->pdev)) {
3613 printk(KERN_ERR PFX 3481 netdev_err(ndev,
3614 "%s: User requested MSI, but MSI failed to " 3482 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n");
3615 "initialize. Continuing without MSI.\n",
3616 qdev->ndev->name);
3617 qdev->msi = 0; 3483 qdev->msi = 0;
3618 } else { 3484 } else {
3619 printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name); 3485 netdev_info(ndev, "MSI Enabled...\n");
3620 set_bit(QL_MSI_ENABLED,&qdev->flags); 3486 set_bit(QL_MSI_ENABLED, &qdev->flags);
3621 irq_flags &= ~IRQF_SHARED; 3487 irq_flags &= ~IRQF_SHARED;
3622 } 3488 }
3623 } 3489 }
3624 3490
3625 if ((err = request_irq(qdev->pdev->irq, 3491 err = request_irq(qdev->pdev->irq, ql3xxx_isr,
3626 ql3xxx_isr, 3492 irq_flags, ndev->name, ndev);
3627 irq_flags, ndev->name, ndev))) { 3493 if (err) {
3628 printk(KERN_ERR PFX 3494 netdev_err(ndev,
3629 "%s: Failed to reserve interrupt %d already in use.\n", 3495 "Failed to reserve interrupt %d - already in use\n",
3630 ndev->name, qdev->pdev->irq); 3496 qdev->pdev->irq);
3631 goto err_irq; 3497 goto err_irq;
3632 } 3498 }
3633 3499
3634 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3500 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3635 3501
3636 if ((err = ql_wait_for_drvr_lock(qdev))) { 3502 err = ql_wait_for_drvr_lock(qdev);
3637 if ((err = ql_adapter_initialize(qdev))) { 3503 if (err) {
3638 printk(KERN_ERR PFX 3504 err = ql_adapter_initialize(qdev);
3639 "%s: Unable to initialize adapter.\n", 3505 if (err) {
3640 ndev->name); 3506 netdev_err(ndev, "Unable to initialize adapter\n");
3641 goto err_init; 3507 goto err_init;
3642 } 3508 }
3643 printk(KERN_ERR PFX 3509 netdev_err(ndev, "Releasing driver lock\n");
3644 "%s: Releaseing driver lock.\n",ndev->name);
3645 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3510 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3646 } else { 3511 } else {
3647 printk(KERN_ERR PFX 3512 netdev_err(ndev, "Could not acquire driver lock\n");
3648 "%s: Could not acquire driver lock.\n",
3649 ndev->name);
3650 goto err_lock; 3513 goto err_lock;
3651 } 3514 }
3652 3515
3653 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3516 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3654 3517
3655 set_bit(QL_ADAPTER_UP,&qdev->flags); 3518 set_bit(QL_ADAPTER_UP, &qdev->flags);
3656 3519
3657 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 3520 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3658 3521
@@ -3666,11 +3529,9 @@ err_lock:
3666 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3529 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3667 free_irq(qdev->pdev->irq, ndev); 3530 free_irq(qdev->pdev->irq, ndev);
3668err_irq: 3531err_irq:
3669 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { 3532 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3670 printk(KERN_INFO PFX 3533 netdev_info(ndev, "calling pci_disable_msi()\n");
3671 "%s: calling pci_disable_msi().\n", 3534 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3672 qdev->ndev->name);
3673 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3674 pci_disable_msi(qdev->pdev); 3535 pci_disable_msi(qdev->pdev);
3675 } 3536 }
3676 return err; 3537 return err;
@@ -3678,10 +3539,9 @@ err_irq:
3678 3539
3679static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) 3540static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3680{ 3541{
3681 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) { 3542 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
3682 printk(KERN_ERR PFX 3543 netdev_err(qdev->ndev,
3683 "%s: Driver up/down cycle failed, " 3544 "Driver up/down cycle failed, closing device\n");
3684 "closing device\n",qdev->ndev->name);
3685 rtnl_lock(); 3545 rtnl_lock();
3686 dev_close(qdev->ndev); 3546 dev_close(qdev->ndev);
3687 rtnl_unlock(); 3547 rtnl_unlock();
@@ -3698,24 +3558,24 @@ static int ql3xxx_close(struct net_device *ndev)
3698 * Wait for device to recover from a reset. 3558 * Wait for device to recover from a reset.
3699 * (Rarely happens, but possible.) 3559 * (Rarely happens, but possible.)
3700 */ 3560 */
3701 while (!test_bit(QL_ADAPTER_UP,&qdev->flags)) 3561 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3702 msleep(50); 3562 msleep(50);
3703 3563
3704 ql_adapter_down(qdev,QL_DO_RESET); 3564 ql_adapter_down(qdev, QL_DO_RESET);
3705 return 0; 3565 return 0;
3706} 3566}
3707 3567
3708static int ql3xxx_open(struct net_device *ndev) 3568static int ql3xxx_open(struct net_device *ndev)
3709{ 3569{
3710 struct ql3_adapter *qdev = netdev_priv(ndev); 3570 struct ql3_adapter *qdev = netdev_priv(ndev);
3711 return (ql_adapter_up(qdev)); 3571 return ql_adapter_up(qdev);
3712} 3572}
3713 3573
3714static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3574static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3715{ 3575{
3716 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3576 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3717 struct ql3xxx_port_registers __iomem *port_regs = 3577 struct ql3xxx_port_registers __iomem *port_regs =
3718 qdev->mem_map_registers; 3578 qdev->mem_map_registers;
3719 struct sockaddr *addr = p; 3579 struct sockaddr *addr = p;
3720 unsigned long hw_flags; 3580 unsigned long hw_flags;
3721 3581
@@ -3750,7 +3610,7 @@ static void ql3xxx_tx_timeout(struct net_device *ndev)
3750{ 3610{
3751 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3611 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3752 3612
3753 printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name); 3613 netdev_err(ndev, "Resetting...\n");
3754 /* 3614 /*
3755 * Stop the queues, we've got a problem. 3615 * Stop the queues, we've got a problem.
3756 */ 3616 */
@@ -3770,11 +3630,12 @@ static void ql_reset_work(struct work_struct *work)
3770 u32 value; 3630 u32 value;
3771 struct ql_tx_buf_cb *tx_cb; 3631 struct ql_tx_buf_cb *tx_cb;
3772 int max_wait_time, i; 3632 int max_wait_time, i;
3773 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 3633 struct ql3xxx_port_registers __iomem *port_regs =
3634 qdev->mem_map_registers;
3774 unsigned long hw_flags; 3635 unsigned long hw_flags;
3775 3636
3776 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) { 3637 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
3777 clear_bit(QL_LINK_MASTER,&qdev->flags); 3638 clear_bit(QL_LINK_MASTER, &qdev->flags);
3778 3639
3779 /* 3640 /*
3780 * Loop through the active list and return the skb. 3641 * Loop through the active list and return the skb.
@@ -3783,17 +3644,19 @@ static void ql_reset_work(struct work_struct *work)
3783 int j; 3644 int j;
3784 tx_cb = &qdev->tx_buf[i]; 3645 tx_cb = &qdev->tx_buf[i];
3785 if (tx_cb->skb) { 3646 if (tx_cb->skb) {
3786 printk(KERN_DEBUG PFX 3647 netdev_printk(KERN_DEBUG, ndev,
3787 "%s: Freeing lost SKB.\n", 3648 "Freeing lost SKB\n");
3788 qdev->ndev->name);
3789 pci_unmap_single(qdev->pdev, 3649 pci_unmap_single(qdev->pdev,
3790 dma_unmap_addr(&tx_cb->map[0], mapaddr), 3650 dma_unmap_addr(&tx_cb->map[0],
3651 mapaddr),
3791 dma_unmap_len(&tx_cb->map[0], maplen), 3652 dma_unmap_len(&tx_cb->map[0], maplen),
3792 PCI_DMA_TODEVICE); 3653 PCI_DMA_TODEVICE);
3793 for(j=1;j<tx_cb->seg_count;j++) { 3654 for (j = 1; j < tx_cb->seg_count; j++) {
3794 pci_unmap_page(qdev->pdev, 3655 pci_unmap_page(qdev->pdev,
3795 dma_unmap_addr(&tx_cb->map[j],mapaddr), 3656 dma_unmap_addr(&tx_cb->map[j],
3796 dma_unmap_len(&tx_cb->map[j],maplen), 3657 mapaddr),
3658 dma_unmap_len(&tx_cb->map[j],
3659 maplen),
3797 PCI_DMA_TODEVICE); 3660 PCI_DMA_TODEVICE);
3798 } 3661 }
3799 dev_kfree_skb(tx_cb->skb); 3662 dev_kfree_skb(tx_cb->skb);
@@ -3801,8 +3664,7 @@ static void ql_reset_work(struct work_struct *work)
3801 } 3664 }
3802 } 3665 }
3803 3666
3804 printk(KERN_ERR PFX 3667 netdev_err(ndev, "Clearing NRI after reset\n");
3805 "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3806 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3668 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3807 ql_write_common_reg(qdev, 3669 ql_write_common_reg(qdev,
3808 &port_regs->CommonRegs. 3670 &port_regs->CommonRegs.
@@ -3818,16 +3680,14 @@ static void ql_reset_work(struct work_struct *work)
3818 3680
3819 ispControlStatus); 3681 ispControlStatus);
3820 if ((value & ISP_CONTROL_SR) == 0) { 3682 if ((value & ISP_CONTROL_SR) == 0) {
3821 printk(KERN_DEBUG PFX 3683 netdev_printk(KERN_DEBUG, ndev,
3822 "%s: reset completed.\n", 3684 "reset completed\n");
3823 qdev->ndev->name);
3824 break; 3685 break;
3825 } 3686 }
3826 3687
3827 if (value & ISP_CONTROL_RI) { 3688 if (value & ISP_CONTROL_RI) {
3828 printk(KERN_DEBUG PFX 3689 netdev_printk(KERN_DEBUG, ndev,
3829 "%s: clearing NRI after reset.\n", 3690 "clearing NRI after reset\n");
3830 qdev->ndev->name);
3831 ql_write_common_reg(qdev, 3691 ql_write_common_reg(qdev,
3832 &port_regs-> 3692 &port_regs->
3833 CommonRegs. 3693 CommonRegs.
@@ -3848,21 +3708,19 @@ static void ql_reset_work(struct work_struct *work)
3848 * Set the reset flags and clear the board again. 3708 * Set the reset flags and clear the board again.
3849 * Nothing else to do... 3709 * Nothing else to do...
3850 */ 3710 */
3851 printk(KERN_ERR PFX 3711 netdev_err(ndev,
3852 "%s: Timed out waiting for reset to " 3712 "Timed out waiting for reset to complete\n");
3853 "complete.\n", ndev->name); 3713 netdev_err(ndev, "Do a reset\n");
3854 printk(KERN_ERR PFX 3714 clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3855 "%s: Do a reset.\n", ndev->name); 3715 clear_bit(QL_RESET_START, &qdev->flags);
3856 clear_bit(QL_RESET_PER_SCSI,&qdev->flags); 3716 ql_cycle_adapter(qdev, QL_DO_RESET);
3857 clear_bit(QL_RESET_START,&qdev->flags);
3858 ql_cycle_adapter(qdev,QL_DO_RESET);
3859 return; 3717 return;
3860 } 3718 }
3861 3719
3862 clear_bit(QL_RESET_ACTIVE,&qdev->flags); 3720 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3863 clear_bit(QL_RESET_PER_SCSI,&qdev->flags); 3721 clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3864 clear_bit(QL_RESET_START,&qdev->flags); 3722 clear_bit(QL_RESET_START, &qdev->flags);
3865 ql_cycle_adapter(qdev,QL_NO_RESET); 3723 ql_cycle_adapter(qdev, QL_NO_RESET);
3866 } 3724 }
3867} 3725}
3868 3726
@@ -3876,7 +3734,8 @@ static void ql_tx_timeout_work(struct work_struct *work)
3876 3734
3877static void ql_get_board_info(struct ql3_adapter *qdev) 3735static void ql_get_board_info(struct ql3_adapter *qdev)
3878{ 3736{
3879 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 3737 struct ql3xxx_port_registers __iomem *port_regs =
3738 qdev->mem_map_registers;
3880 u32 value; 3739 u32 value;
3881 3740
3882 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); 3741 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
@@ -3915,20 +3774,18 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3915{ 3774{
3916 struct net_device *ndev = NULL; 3775 struct net_device *ndev = NULL;
3917 struct ql3_adapter *qdev = NULL; 3776 struct ql3_adapter *qdev = NULL;
3918 static int cards_found = 0; 3777 static int cards_found;
3919 int uninitialized_var(pci_using_dac), err; 3778 int uninitialized_var(pci_using_dac), err;
3920 3779
3921 err = pci_enable_device(pdev); 3780 err = pci_enable_device(pdev);
3922 if (err) { 3781 if (err) {
3923 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 3782 pr_err("%s cannot enable PCI device\n", pci_name(pdev));
3924 pci_name(pdev));
3925 goto err_out; 3783 goto err_out;
3926 } 3784 }
3927 3785
3928 err = pci_request_regions(pdev, DRV_NAME); 3786 err = pci_request_regions(pdev, DRV_NAME);
3929 if (err) { 3787 if (err) {
3930 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 3788 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
3931 pci_name(pdev));
3932 goto err_out_disable_pdev; 3789 goto err_out_disable_pdev;
3933 } 3790 }
3934 3791
@@ -3943,15 +3800,13 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3943 } 3800 }
3944 3801
3945 if (err) { 3802 if (err) {
3946 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3803 pr_err("%s no usable DMA configuration\n", pci_name(pdev));
3947 pci_name(pdev));
3948 goto err_out_free_regions; 3804 goto err_out_free_regions;
3949 } 3805 }
3950 3806
3951 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3807 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3952 if (!ndev) { 3808 if (!ndev) {
3953 printk(KERN_ERR PFX "%s could not alloc etherdev\n", 3809 pr_err("%s could not alloc etherdev\n", pci_name(pdev));
3954 pci_name(pdev));
3955 err = -ENOMEM; 3810 err = -ENOMEM;
3956 goto err_out_free_regions; 3811 goto err_out_free_regions;
3957 } 3812 }
@@ -3978,8 +3833,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3978 3833
3979 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); 3834 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3980 if (!qdev->mem_map_registers) { 3835 if (!qdev->mem_map_registers) {
3981 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3836 pr_err("%s: cannot map device registers\n", pci_name(pdev));
3982 pci_name(pdev));
3983 err = -EIO; 3837 err = -EIO;
3984 goto err_out_free_ndev; 3838 goto err_out_free_ndev;
3985 } 3839 }
@@ -3998,9 +3852,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3998 3852
3999 /* make sure the EEPROM is good */ 3853 /* make sure the EEPROM is good */
4000 if (ql_get_nvram_params(qdev)) { 3854 if (ql_get_nvram_params(qdev)) {
4001 printk(KERN_ALERT PFX 3855 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
4002 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", 3856 __func__, qdev->index);
4003 qdev->index);
4004 err = -EIO; 3857 err = -EIO;
4005 goto err_out_iounmap; 3858 goto err_out_iounmap;
4006 } 3859 }
@@ -4026,14 +3879,12 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
4026 * Set the Maximum Memory Read Byte Count value. We do this to handle 3879 * Set the Maximum Memory Read Byte Count value. We do this to handle
4027 * jumbo frames. 3880 * jumbo frames.
4028 */ 3881 */
4029 if (qdev->pci_x) { 3882 if (qdev->pci_x)
4030 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); 3883 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
4031 }
4032 3884
4033 err = register_netdev(ndev); 3885 err = register_netdev(ndev);
4034 if (err) { 3886 if (err) {
4035 printk(KERN_ERR PFX "%s: cannot register net device\n", 3887 pr_err("%s: cannot register net device\n", pci_name(pdev));
4036 pci_name(pdev));
4037 goto err_out_iounmap; 3888 goto err_out_iounmap;
4038 } 3889 }
4039 3890
@@ -4052,10 +3903,10 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
4052 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ 3903 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
4053 qdev->adapter_timer.data = (unsigned long)qdev; 3904 qdev->adapter_timer.data = (unsigned long)qdev;
4054 3905
4055 if(!cards_found) { 3906 if (!cards_found) {
4056 printk(KERN_ALERT PFX "%s\n", DRV_STRING); 3907 pr_alert("%s\n", DRV_STRING);
4057 printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n", 3908 pr_alert("Driver name: %s, Version: %s\n",
4058 DRV_NAME, DRV_VERSION); 3909 DRV_NAME, DRV_VERSION);
4059 } 3910 }
4060 ql_display_dev_info(ndev); 3911 ql_display_dev_info(ndev);
4061 3912