diff options
author | Joe Perches <joe@perches.com> | 2010-07-22 11:36:17 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-07-26 16:15:21 -0400 |
commit | d7f61777e9ec6951e99fb6fe06ba956b9bc4bbab (patch) | |
tree | 4e186d0ecd1603044639496b4f53a6bb3a6fec51 /drivers/net/qla3xxx.c | |
parent | eddc5fbd80999444dd32aca3c90290c9d64da396 (diff) |
drivers/net/qla3xxx.c: Checkpatch cleanups
Remove typedefs
Indentation and spacing
Use a temporary for a very long pointer variable
More 80 column compatible
Convert a switch to if/else if
Compile tested only, depends on patch "Update logging message style"
(old)
$ scripts/checkpatch.pl -f drivers/net/qla3xxx.c | grep "^total:"
total: 209 errors, 82 warnings, 3995 lines checked
(new)
$ scripts/checkpatch.pl -f drivers/net/qla3xxx.c | grep "^total:"
total: 2 errors, 0 warnings, 3970 lines checked
$ size drivers/net/qla3xxx.o.*
text data bss dec hex filename
50413 212 13864 64489 fbe9 drivers/net/qla3xxx.o.old
49959 212 13728 63899 f99b drivers/net/qla3xxx.o.new
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/qla3xxx.c')
-rw-r--r-- | drivers/net/qla3xxx.c | 943 |
1 files changed, 459 insertions, 484 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 74debf167c52..6168a130f33f 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -38,8 +38,8 @@ | |||
38 | 38 | ||
39 | #include "qla3xxx.h" | 39 | #include "qla3xxx.h" |
40 | 40 | ||
41 | #define DRV_NAME "qla3xxx" | 41 | #define DRV_NAME "qla3xxx" |
42 | #define DRV_STRING "QLogic ISP3XXX Network Driver" | 42 | #define DRV_STRING "QLogic ISP3XXX Network Driver" |
43 | #define DRV_VERSION "v2.03.00-k5" | 43 | #define DRV_VERSION "v2.03.00-k5" |
44 | 44 | ||
45 | static const char ql3xxx_driver_name[] = DRV_NAME; | 45 | static const char ql3xxx_driver_name[] = DRV_NAME; |
@@ -77,24 +77,24 @@ MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); | |||
77 | /* | 77 | /* |
78 | * These are the known PHY's which are used | 78 | * These are the known PHY's which are used |
79 | */ | 79 | */ |
80 | typedef enum { | 80 | enum PHY_DEVICE_TYPE { |
81 | PHY_TYPE_UNKNOWN = 0, | 81 | PHY_TYPE_UNKNOWN = 0, |
82 | PHY_VITESSE_VSC8211, | 82 | PHY_VITESSE_VSC8211, |
83 | PHY_AGERE_ET1011C, | 83 | PHY_AGERE_ET1011C, |
84 | MAX_PHY_DEV_TYPES | 84 | MAX_PHY_DEV_TYPES |
85 | } PHY_DEVICE_et; | 85 | }; |
86 | 86 | ||
87 | typedef struct { | 87 | struct PHY_DEVICE_INFO { |
88 | PHY_DEVICE_et phyDevice; | 88 | const enum PHY_DEVICE_TYPE phyDevice; |
89 | u32 phyIdOUI; | 89 | const u32 phyIdOUI; |
90 | u16 phyIdModel; | 90 | const u16 phyIdModel; |
91 | char *name; | 91 | const char *name; |
92 | } PHY_DEVICE_INFO_t; | 92 | }; |
93 | 93 | ||
94 | static const PHY_DEVICE_INFO_t PHY_DEVICES[] = | 94 | static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { |
95 | {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, | 95 | {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, |
96 | {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, | 96 | {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, |
97 | {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, | 97 | {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, |
98 | }; | 98 | }; |
99 | 99 | ||
100 | 100 | ||
@@ -104,7 +104,8 @@ static const PHY_DEVICE_INFO_t PHY_DEVICES[] = | |||
104 | static int ql_sem_spinlock(struct ql3_adapter *qdev, | 104 | static int ql_sem_spinlock(struct ql3_adapter *qdev, |
105 | u32 sem_mask, u32 sem_bits) | 105 | u32 sem_mask, u32 sem_bits) |
106 | { | 106 | { |
107 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 107 | struct ql3xxx_port_registers __iomem *port_regs = |
108 | qdev->mem_map_registers; | ||
108 | u32 value; | 109 | u32 value; |
109 | unsigned int seconds = 3; | 110 | unsigned int seconds = 3; |
110 | 111 | ||
@@ -115,20 +116,22 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev, | |||
115 | if ((value & (sem_mask >> 16)) == sem_bits) | 116 | if ((value & (sem_mask >> 16)) == sem_bits) |
116 | return 0; | 117 | return 0; |
117 | ssleep(1); | 118 | ssleep(1); |
118 | } while(--seconds); | 119 | } while (--seconds); |
119 | return -1; | 120 | return -1; |
120 | } | 121 | } |
121 | 122 | ||
122 | static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) | 123 | static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) |
123 | { | 124 | { |
124 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 125 | struct ql3xxx_port_registers __iomem *port_regs = |
126 | qdev->mem_map_registers; | ||
125 | writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); | 127 | writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); |
126 | readl(&port_regs->CommonRegs.semaphoreReg); | 128 | readl(&port_regs->CommonRegs.semaphoreReg); |
127 | } | 129 | } |
128 | 130 | ||
129 | static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) | 131 | static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) |
130 | { | 132 | { |
131 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 133 | struct ql3xxx_port_registers __iomem *port_regs = |
134 | qdev->mem_map_registers; | ||
132 | u32 value; | 135 | u32 value; |
133 | 136 | ||
134 | writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); | 137 | writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); |
@@ -163,7 +166,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) | |||
163 | 166 | ||
164 | static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) | 167 | static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) |
165 | { | 168 | { |
166 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 169 | struct ql3xxx_port_registers __iomem *port_regs = |
170 | qdev->mem_map_registers; | ||
167 | 171 | ||
168 | writel(((ISP_CONTROL_NP_MASK << 16) | page), | 172 | writel(((ISP_CONTROL_NP_MASK << 16) | page), |
169 | &port_regs->CommonRegs.ispControlStatus); | 173 | &port_regs->CommonRegs.ispControlStatus); |
@@ -171,8 +175,7 @@ static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) | |||
171 | qdev->current_page = page; | 175 | qdev->current_page = page; |
172 | } | 176 | } |
173 | 177 | ||
174 | static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, | 178 | static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) |
175 | u32 __iomem * reg) | ||
176 | { | 179 | { |
177 | u32 value; | 180 | u32 value; |
178 | unsigned long hw_flags; | 181 | unsigned long hw_flags; |
@@ -184,8 +187,7 @@ static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, | |||
184 | return value; | 187 | return value; |
185 | } | 188 | } |
186 | 189 | ||
187 | static u32 ql_read_common_reg(struct ql3_adapter *qdev, | 190 | static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) |
188 | u32 __iomem * reg) | ||
189 | { | 191 | { |
190 | return readl(reg); | 192 | return readl(reg); |
191 | } | 193 | } |
@@ -198,7 +200,7 @@ static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) | |||
198 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 200 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
199 | 201 | ||
200 | if (qdev->current_page != 0) | 202 | if (qdev->current_page != 0) |
201 | ql_set_register_page(qdev,0); | 203 | ql_set_register_page(qdev, 0); |
202 | value = readl(reg); | 204 | value = readl(reg); |
203 | 205 | ||
204 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 206 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
@@ -208,7 +210,7 @@ static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) | |||
208 | static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) | 210 | static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) |
209 | { | 211 | { |
210 | if (qdev->current_page != 0) | 212 | if (qdev->current_page != 0) |
211 | ql_set_register_page(qdev,0); | 213 | ql_set_register_page(qdev, 0); |
212 | return readl(reg); | 214 | return readl(reg); |
213 | } | 215 | } |
214 | 216 | ||
@@ -242,7 +244,7 @@ static void ql_write_page0_reg(struct ql3_adapter *qdev, | |||
242 | u32 __iomem *reg, u32 value) | 244 | u32 __iomem *reg, u32 value) |
243 | { | 245 | { |
244 | if (qdev->current_page != 0) | 246 | if (qdev->current_page != 0) |
245 | ql_set_register_page(qdev,0); | 247 | ql_set_register_page(qdev, 0); |
246 | writel(value, reg); | 248 | writel(value, reg); |
247 | readl(reg); | 249 | readl(reg); |
248 | } | 250 | } |
@@ -254,7 +256,7 @@ static void ql_write_page1_reg(struct ql3_adapter *qdev, | |||
254 | u32 __iomem *reg, u32 value) | 256 | u32 __iomem *reg, u32 value) |
255 | { | 257 | { |
256 | if (qdev->current_page != 1) | 258 | if (qdev->current_page != 1) |
257 | ql_set_register_page(qdev,1); | 259 | ql_set_register_page(qdev, 1); |
258 | writel(value, reg); | 260 | writel(value, reg); |
259 | readl(reg); | 261 | readl(reg); |
260 | } | 262 | } |
@@ -266,14 +268,15 @@ static void ql_write_page2_reg(struct ql3_adapter *qdev, | |||
266 | u32 __iomem *reg, u32 value) | 268 | u32 __iomem *reg, u32 value) |
267 | { | 269 | { |
268 | if (qdev->current_page != 2) | 270 | if (qdev->current_page != 2) |
269 | ql_set_register_page(qdev,2); | 271 | ql_set_register_page(qdev, 2); |
270 | writel(value, reg); | 272 | writel(value, reg); |
271 | readl(reg); | 273 | readl(reg); |
272 | } | 274 | } |
273 | 275 | ||
274 | static void ql_disable_interrupts(struct ql3_adapter *qdev) | 276 | static void ql_disable_interrupts(struct ql3_adapter *qdev) |
275 | { | 277 | { |
276 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 278 | struct ql3xxx_port_registers __iomem *port_regs = |
279 | qdev->mem_map_registers; | ||
277 | 280 | ||
278 | ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, | 281 | ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, |
279 | (ISP_IMR_ENABLE_INT << 16)); | 282 | (ISP_IMR_ENABLE_INT << 16)); |
@@ -282,7 +285,8 @@ static void ql_disable_interrupts(struct ql3_adapter *qdev) | |||
282 | 285 | ||
283 | static void ql_enable_interrupts(struct ql3_adapter *qdev) | 286 | static void ql_enable_interrupts(struct ql3_adapter *qdev) |
284 | { | 287 | { |
285 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 288 | struct ql3xxx_port_registers __iomem *port_regs = |
289 | qdev->mem_map_registers; | ||
286 | 290 | ||
287 | ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, | 291 | ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, |
288 | ((0xff << 16) | ISP_IMR_ENABLE_INT)); | 292 | ((0xff << 16) | ISP_IMR_ENABLE_INT)); |
@@ -321,7 +325,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, | |||
321 | QL_HEADER_SPACE, | 325 | QL_HEADER_SPACE, |
322 | PCI_DMA_FROMDEVICE); | 326 | PCI_DMA_FROMDEVICE); |
323 | err = pci_dma_mapping_error(qdev->pdev, map); | 327 | err = pci_dma_mapping_error(qdev->pdev, map); |
324 | if(err) { | 328 | if (err) { |
325 | netdev_err(qdev->ndev, | 329 | netdev_err(qdev->ndev, |
326 | "PCI mapping failed with error: %d\n", | 330 | "PCI mapping failed with error: %d\n", |
327 | err); | 331 | err); |
@@ -349,10 +353,11 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, | |||
349 | static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter | 353 | static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter |
350 | *qdev) | 354 | *qdev) |
351 | { | 355 | { |
352 | struct ql_rcv_buf_cb *lrg_buf_cb; | 356 | struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; |
353 | 357 | ||
354 | if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) { | 358 | if (lrg_buf_cb != NULL) { |
355 | if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL) | 359 | qdev->lrg_buf_free_head = lrg_buf_cb->next; |
360 | if (qdev->lrg_buf_free_head == NULL) | ||
356 | qdev->lrg_buf_free_tail = NULL; | 361 | qdev->lrg_buf_free_tail = NULL; |
357 | qdev->lrg_buf_free_count--; | 362 | qdev->lrg_buf_free_count--; |
358 | } | 363 | } |
@@ -373,13 +378,13 @@ static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, | |||
373 | static void fm93c56a_select(struct ql3_adapter *qdev) | 378 | static void fm93c56a_select(struct ql3_adapter *qdev) |
374 | { | 379 | { |
375 | struct ql3xxx_port_registers __iomem *port_regs = | 380 | struct ql3xxx_port_registers __iomem *port_regs = |
376 | qdev->mem_map_registers; | 381 | qdev->mem_map_registers; |
382 | u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
377 | 383 | ||
378 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; | 384 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; |
379 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 385 | ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); |
380 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data); | 386 | ql_write_nvram_reg(qdev, spir, |
381 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 387 | ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); |
382 | ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); | ||
383 | } | 388 | } |
384 | 389 | ||
385 | /* | 390 | /* |
@@ -392,51 +397,40 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) | |||
392 | u32 dataBit; | 397 | u32 dataBit; |
393 | u32 previousBit; | 398 | u32 previousBit; |
394 | struct ql3xxx_port_registers __iomem *port_regs = | 399 | struct ql3xxx_port_registers __iomem *port_regs = |
395 | qdev->mem_map_registers; | 400 | qdev->mem_map_registers; |
401 | u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
396 | 402 | ||
397 | /* Clock in a zero, then do the start bit */ | 403 | /* Clock in a zero, then do the start bit */ |
398 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 404 | ql_write_nvram_reg(qdev, spir, |
399 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | 405 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
400 | AUBURN_EEPROM_DO_1); | 406 | AUBURN_EEPROM_DO_1)); |
401 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 407 | ql_write_nvram_reg(qdev, spir, |
402 | ISP_NVRAM_MASK | qdev-> | 408 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
403 | eeprom_cmd_data | AUBURN_EEPROM_DO_1 | | 409 | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); |
404 | AUBURN_EEPROM_CLK_RISE); | 410 | ql_write_nvram_reg(qdev, spir, |
405 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 411 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
406 | ISP_NVRAM_MASK | qdev-> | 412 | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); |
407 | eeprom_cmd_data | AUBURN_EEPROM_DO_1 | | ||
408 | AUBURN_EEPROM_CLK_FALL); | ||
409 | 413 | ||
410 | mask = 1 << (FM93C56A_CMD_BITS - 1); | 414 | mask = 1 << (FM93C56A_CMD_BITS - 1); |
411 | /* Force the previous data bit to be different */ | 415 | /* Force the previous data bit to be different */ |
412 | previousBit = 0xffff; | 416 | previousBit = 0xffff; |
413 | for (i = 0; i < FM93C56A_CMD_BITS; i++) { | 417 | for (i = 0; i < FM93C56A_CMD_BITS; i++) { |
414 | dataBit = | 418 | dataBit = (cmd & mask) |
415 | (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; | 419 | ? AUBURN_EEPROM_DO_1 |
420 | : AUBURN_EEPROM_DO_0; | ||
416 | if (previousBit != dataBit) { | 421 | if (previousBit != dataBit) { |
417 | /* | 422 | /* If the bit changed, change the DO state to match */ |
418 | * If the bit changed, then change the DO state to | 423 | ql_write_nvram_reg(qdev, spir, |
419 | * match | 424 | (ISP_NVRAM_MASK | |
420 | */ | 425 | qdev->eeprom_cmd_data | dataBit)); |
421 | ql_write_nvram_reg(qdev, | ||
422 | &port_regs->CommonRegs. | ||
423 | serialPortInterfaceReg, | ||
424 | ISP_NVRAM_MASK | qdev-> | ||
425 | eeprom_cmd_data | dataBit); | ||
426 | previousBit = dataBit; | 426 | previousBit = dataBit; |
427 | } | 427 | } |
428 | ql_write_nvram_reg(qdev, | 428 | ql_write_nvram_reg(qdev, spir, |
429 | &port_regs->CommonRegs. | 429 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
430 | serialPortInterfaceReg, | 430 | dataBit | AUBURN_EEPROM_CLK_RISE)); |
431 | ISP_NVRAM_MASK | qdev-> | 431 | ql_write_nvram_reg(qdev, spir, |
432 | eeprom_cmd_data | dataBit | | 432 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
433 | AUBURN_EEPROM_CLK_RISE); | 433 | dataBit | AUBURN_EEPROM_CLK_FALL)); |
434 | ql_write_nvram_reg(qdev, | ||
435 | &port_regs->CommonRegs. | ||
436 | serialPortInterfaceReg, | ||
437 | ISP_NVRAM_MASK | qdev-> | ||
438 | eeprom_cmd_data | dataBit | | ||
439 | AUBURN_EEPROM_CLK_FALL); | ||
440 | cmd = cmd << 1; | 434 | cmd = cmd << 1; |
441 | } | 435 | } |
442 | 436 | ||
@@ -444,33 +438,24 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) | |||
444 | /* Force the previous data bit to be different */ | 438 | /* Force the previous data bit to be different */ |
445 | previousBit = 0xffff; | 439 | previousBit = 0xffff; |
446 | for (i = 0; i < addrBits; i++) { | 440 | for (i = 0; i < addrBits; i++) { |
447 | dataBit = | 441 | dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 |
448 | (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 : | 442 | : AUBURN_EEPROM_DO_0; |
449 | AUBURN_EEPROM_DO_0; | ||
450 | if (previousBit != dataBit) { | 443 | if (previousBit != dataBit) { |
451 | /* | 444 | /* |
452 | * If the bit changed, then change the DO state to | 445 | * If the bit changed, then change the DO state to |
453 | * match | 446 | * match |
454 | */ | 447 | */ |
455 | ql_write_nvram_reg(qdev, | 448 | ql_write_nvram_reg(qdev, spir, |
456 | &port_regs->CommonRegs. | 449 | (ISP_NVRAM_MASK | |
457 | serialPortInterfaceReg, | 450 | qdev->eeprom_cmd_data | dataBit)); |
458 | ISP_NVRAM_MASK | qdev-> | ||
459 | eeprom_cmd_data | dataBit); | ||
460 | previousBit = dataBit; | 451 | previousBit = dataBit; |
461 | } | 452 | } |
462 | ql_write_nvram_reg(qdev, | 453 | ql_write_nvram_reg(qdev, spir, |
463 | &port_regs->CommonRegs. | 454 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
464 | serialPortInterfaceReg, | 455 | dataBit | AUBURN_EEPROM_CLK_RISE)); |
465 | ISP_NVRAM_MASK | qdev-> | 456 | ql_write_nvram_reg(qdev, spir, |
466 | eeprom_cmd_data | dataBit | | 457 | (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
467 | AUBURN_EEPROM_CLK_RISE); | 458 | dataBit | AUBURN_EEPROM_CLK_FALL)); |
468 | ql_write_nvram_reg(qdev, | ||
469 | &port_regs->CommonRegs. | ||
470 | serialPortInterfaceReg, | ||
471 | ISP_NVRAM_MASK | qdev-> | ||
472 | eeprom_cmd_data | dataBit | | ||
473 | AUBURN_EEPROM_CLK_FALL); | ||
474 | eepromAddr = eepromAddr << 1; | 459 | eepromAddr = eepromAddr << 1; |
475 | } | 460 | } |
476 | } | 461 | } |
@@ -481,10 +466,11 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) | |||
481 | static void fm93c56a_deselect(struct ql3_adapter *qdev) | 466 | static void fm93c56a_deselect(struct ql3_adapter *qdev) |
482 | { | 467 | { |
483 | struct ql3xxx_port_registers __iomem *port_regs = | 468 | struct ql3xxx_port_registers __iomem *port_regs = |
484 | qdev->mem_map_registers; | 469 | qdev->mem_map_registers; |
470 | u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
471 | |||
485 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; | 472 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; |
486 | ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 473 | ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); |
487 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data); | ||
488 | } | 474 | } |
489 | 475 | ||
490 | /* | 476 | /* |
@@ -496,29 +482,23 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) | |||
496 | u32 data = 0; | 482 | u32 data = 0; |
497 | u32 dataBit; | 483 | u32 dataBit; |
498 | struct ql3xxx_port_registers __iomem *port_regs = | 484 | struct ql3xxx_port_registers __iomem *port_regs = |
499 | qdev->mem_map_registers; | 485 | qdev->mem_map_registers; |
486 | u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
500 | 487 | ||
501 | /* Read the data bits */ | 488 | /* Read the data bits */ |
502 | /* The first bit is a dummy. Clock right over it. */ | 489 | /* The first bit is a dummy. Clock right over it. */ |
503 | for (i = 0; i < dataBits; i++) { | 490 | for (i = 0; i < dataBits; i++) { |
504 | ql_write_nvram_reg(qdev, | 491 | ql_write_nvram_reg(qdev, spir, |
505 | &port_regs->CommonRegs. | 492 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
506 | serialPortInterfaceReg, | 493 | AUBURN_EEPROM_CLK_RISE); |
507 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | 494 | ql_write_nvram_reg(qdev, spir, |
508 | AUBURN_EEPROM_CLK_RISE); | 495 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | |
509 | ql_write_nvram_reg(qdev, | 496 | AUBURN_EEPROM_CLK_FALL); |
510 | &port_regs->CommonRegs. | 497 | dataBit = (ql_read_common_reg(qdev, spir) & |
511 | serialPortInterfaceReg, | 498 | AUBURN_EEPROM_DI_1) ? 1 : 0; |
512 | ISP_NVRAM_MASK | qdev->eeprom_cmd_data | | ||
513 | AUBURN_EEPROM_CLK_FALL); | ||
514 | dataBit = | ||
515 | (ql_read_common_reg | ||
516 | (qdev, | ||
517 | &port_regs->CommonRegs. | ||
518 | serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0; | ||
519 | data = (data << 1) | dataBit; | 499 | data = (data << 1) | dataBit; |
520 | } | 500 | } |
521 | *value = (u16) data; | 501 | *value = (u16)data; |
522 | } | 502 | } |
523 | 503 | ||
524 | /* | 504 | /* |
@@ -550,9 +530,9 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev) | |||
550 | 530 | ||
551 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 531 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
552 | 532 | ||
553 | pEEPROMData = (u16 *) & qdev->nvram_data; | 533 | pEEPROMData = (u16 *)&qdev->nvram_data; |
554 | qdev->eeprom_cmd_data = 0; | 534 | qdev->eeprom_cmd_data = 0; |
555 | if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, | 535 | if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, |
556 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 536 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * |
557 | 2) << 10)) { | 537 | 2) << 10)) { |
558 | pr_err("%s: Failed ql_sem_spinlock()\n", __func__); | 538 | pr_err("%s: Failed ql_sem_spinlock()\n", __func__); |
@@ -585,7 +565,7 @@ static const u32 PHYAddr[2] = { | |||
585 | static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) | 565 | static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) |
586 | { | 566 | { |
587 | struct ql3xxx_port_registers __iomem *port_regs = | 567 | struct ql3xxx_port_registers __iomem *port_regs = |
588 | qdev->mem_map_registers; | 568 | qdev->mem_map_registers; |
589 | u32 temp; | 569 | u32 temp; |
590 | int count = 1000; | 570 | int count = 1000; |
591 | 571 | ||
@@ -602,7 +582,7 @@ static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) | |||
602 | static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) | 582 | static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) |
603 | { | 583 | { |
604 | struct ql3xxx_port_registers __iomem *port_regs = | 584 | struct ql3xxx_port_registers __iomem *port_regs = |
605 | qdev->mem_map_registers; | 585 | qdev->mem_map_registers; |
606 | u32 scanControl; | 586 | u32 scanControl; |
607 | 587 | ||
608 | if (qdev->numPorts > 1) { | 588 | if (qdev->numPorts > 1) { |
@@ -630,7 +610,7 @@ static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) | |||
630 | { | 610 | { |
631 | u8 ret; | 611 | u8 ret; |
632 | struct ql3xxx_port_registers __iomem *port_regs = | 612 | struct ql3xxx_port_registers __iomem *port_regs = |
633 | qdev->mem_map_registers; | 613 | qdev->mem_map_registers; |
634 | 614 | ||
635 | /* See if scan mode is enabled before we turn it off */ | 615 | /* See if scan mode is enabled before we turn it off */ |
636 | if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & | 616 | if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & |
@@ -660,7 +640,7 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, | |||
660 | u16 regAddr, u16 value, u32 phyAddr) | 640 | u16 regAddr, u16 value, u32 phyAddr) |
661 | { | 641 | { |
662 | struct ql3xxx_port_registers __iomem *port_regs = | 642 | struct ql3xxx_port_registers __iomem *port_regs = |
663 | qdev->mem_map_registers; | 643 | qdev->mem_map_registers; |
664 | u8 scanWasEnabled; | 644 | u8 scanWasEnabled; |
665 | 645 | ||
666 | scanWasEnabled = ql_mii_disable_scan_mode(qdev); | 646 | scanWasEnabled = ql_mii_disable_scan_mode(qdev); |
@@ -688,10 +668,10 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, | |||
688 | } | 668 | } |
689 | 669 | ||
690 | static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, | 670 | static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, |
691 | u16 * value, u32 phyAddr) | 671 | u16 *value, u32 phyAddr) |
692 | { | 672 | { |
693 | struct ql3xxx_port_registers __iomem *port_regs = | 673 | struct ql3xxx_port_registers __iomem *port_regs = |
694 | qdev->mem_map_registers; | 674 | qdev->mem_map_registers; |
695 | u8 scanWasEnabled; | 675 | u8 scanWasEnabled; |
696 | u32 temp; | 676 | u32 temp; |
697 | 677 | ||
@@ -729,7 +709,7 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, | |||
729 | static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) | 709 | static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) |
730 | { | 710 | { |
731 | struct ql3xxx_port_registers __iomem *port_regs = | 711 | struct ql3xxx_port_registers __iomem *port_regs = |
732 | qdev->mem_map_registers; | 712 | qdev->mem_map_registers; |
733 | 713 | ||
734 | ql_mii_disable_scan_mode(qdev); | 714 | ql_mii_disable_scan_mode(qdev); |
735 | 715 | ||
@@ -758,7 +738,7 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) | |||
758 | { | 738 | { |
759 | u32 temp; | 739 | u32 temp; |
760 | struct ql3xxx_port_registers __iomem *port_regs = | 740 | struct ql3xxx_port_registers __iomem *port_regs = |
761 | qdev->mem_map_registers; | 741 | qdev->mem_map_registers; |
762 | 742 | ||
763 | ql_mii_disable_scan_mode(qdev); | 743 | ql_mii_disable_scan_mode(qdev); |
764 | 744 | ||
@@ -884,7 +864,8 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) | |||
884 | /* point to hidden reg 0x2806 */ | 864 | /* point to hidden reg 0x2806 */ |
885 | ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); | 865 | ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); |
886 | /* Write new PHYAD w/bit 5 set */ | 866 | /* Write new PHYAD w/bit 5 set */ |
887 | ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); | 867 | ql_mii_write_reg_ex(qdev, 0x11, |
868 | 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); | ||
888 | /* | 869 | /* |
889 | * Disable diagnostic mode bit 2 = 0 | 870 | * Disable diagnostic mode bit 2 = 0 |
890 | * Power up device bit 11 = 0 | 871 | * Power up device bit 11 = 0 |
@@ -895,21 +876,19 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) | |||
895 | ql_mii_write_reg(qdev, 0x1c, 0xfaf0); | 876 | ql_mii_write_reg(qdev, 0x1c, 0xfaf0); |
896 | } | 877 | } |
897 | 878 | ||
898 | static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, | 879 | static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, |
899 | u16 phyIdReg0, u16 phyIdReg1) | 880 | u16 phyIdReg0, u16 phyIdReg1) |
900 | { | 881 | { |
901 | PHY_DEVICE_et result = PHY_TYPE_UNKNOWN; | 882 | enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; |
902 | u32 oui; | 883 | u32 oui; |
903 | u16 model; | 884 | u16 model; |
904 | int i; | 885 | int i; |
905 | 886 | ||
906 | if (phyIdReg0 == 0xffff) { | 887 | if (phyIdReg0 == 0xffff) |
907 | return result; | 888 | return result; |
908 | } | ||
909 | 889 | ||
910 | if (phyIdReg1 == 0xffff) { | 890 | if (phyIdReg1 == 0xffff) |
911 | return result; | 891 | return result; |
912 | } | ||
913 | 892 | ||
914 | /* oui is split between two registers */ | 893 | /* oui is split between two registers */ |
915 | oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); | 894 | oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); |
@@ -917,15 +896,13 @@ static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, | |||
917 | model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; | 896 | model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; |
918 | 897 | ||
919 | /* Scan table for this PHY */ | 898 | /* Scan table for this PHY */ |
920 | for(i = 0; i < MAX_PHY_DEV_TYPES; i++) { | 899 | for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { |
921 | if ((oui == PHY_DEVICES[i].phyIdOUI) && | 900 | if ((oui == PHY_DEVICES[i].phyIdOUI) && |
922 | (model == PHY_DEVICES[i].phyIdModel)) { | 901 | (model == PHY_DEVICES[i].phyIdModel)) { |
923 | result = PHY_DEVICES[i].phyDevice; | ||
924 | |||
925 | netdev_info(qdev->ndev, "Phy: %s\n", | 902 | netdev_info(qdev->ndev, "Phy: %s\n", |
926 | PHY_DEVICES[i].name); | 903 | PHY_DEVICES[i].name); |
927 | 904 | result = PHY_DEVICES[i].phyDevice; | |
928 | break; | 905 | break; |
929 | } | 906 | } |
930 | } | 907 | } |
931 | 908 | ||
@@ -936,9 +913,8 @@ static int ql_phy_get_speed(struct ql3_adapter *qdev) | |||
936 | { | 913 | { |
937 | u16 reg; | 914 | u16 reg; |
938 | 915 | ||
939 | switch(qdev->phyType) { | 916 | switch (qdev->phyType) { |
940 | case PHY_AGERE_ET1011C: | 917 | case PHY_AGERE_ET1011C: { |
941 | { | ||
942 | if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) | 918 | if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) |
943 | return 0; | 919 | return 0; |
944 | 920 | ||
@@ -946,20 +922,20 @@ static int ql_phy_get_speed(struct ql3_adapter *qdev) | |||
946 | break; | 922 | break; |
947 | } | 923 | } |
948 | default: | 924 | default: |
949 | if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) | 925 | if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) |
950 | return 0; | 926 | return 0; |
951 | 927 | ||
952 | reg = (((reg & 0x18) >> 3) & 3); | 928 | reg = (((reg & 0x18) >> 3) & 3); |
953 | } | 929 | } |
954 | 930 | ||
955 | switch(reg) { | 931 | switch (reg) { |
956 | case 2: | 932 | case 2: |
957 | return SPEED_1000; | 933 | return SPEED_1000; |
958 | case 1: | 934 | case 1: |
959 | return SPEED_100; | 935 | return SPEED_100; |
960 | case 0: | 936 | case 0: |
961 | return SPEED_10; | 937 | return SPEED_10; |
962 | default: | 938 | default: |
963 | return -1; | 939 | return -1; |
964 | } | 940 | } |
965 | } | 941 | } |
@@ -968,17 +944,15 @@ static int ql_is_full_dup(struct ql3_adapter *qdev) | |||
968 | { | 944 | { |
969 | u16 reg; | 945 | u16 reg; |
970 | 946 | ||
971 | switch(qdev->phyType) { | 947 | switch (qdev->phyType) { |
972 | case PHY_AGERE_ET1011C: | 948 | case PHY_AGERE_ET1011C: { |
973 | { | ||
974 | if (ql_mii_read_reg(qdev, 0x1A, ®)) | 949 | if (ql_mii_read_reg(qdev, 0x1A, ®)) |
975 | return 0; | 950 | return 0; |
976 | 951 | ||
977 | return ((reg & 0x0080) && (reg & 0x1000)) != 0; | 952 | return ((reg & 0x0080) && (reg & 0x1000)) != 0; |
978 | } | 953 | } |
979 | case PHY_VITESSE_VSC8211: | 954 | case PHY_VITESSE_VSC8211: |
980 | default: | 955 | default: { |
981 | { | ||
982 | if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) | 956 | if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) |
983 | return 0; | 957 | return 0; |
984 | return (reg & PHY_AUX_DUPLEX_STAT) != 0; | 958 | return (reg & PHY_AUX_DUPLEX_STAT) != 0; |
@@ -1006,15 +980,15 @@ static int PHY_Setup(struct ql3_adapter *qdev) | |||
1006 | 980 | ||
1007 | /* Determine the PHY we are using by reading the ID's */ | 981 | /* Determine the PHY we are using by reading the ID's */ |
1008 | err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); | 982 | err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); |
1009 | if(err != 0) { | 983 | if (err != 0) { |
1010 | netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); | 984 | netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); |
1011 | return err; | 985 | return err; |
1012 | } | 986 | } |
1013 | 987 | ||
1014 | err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); | 988 | err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); |
1015 | if(err != 0) { | 989 | if (err != 0) { |
1016 | netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); | 990 | netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); |
1017 | return err; | 991 | return err; |
1018 | } | 992 | } |
1019 | 993 | ||
1020 | /* Check if we have a Agere PHY */ | 994 | /* Check if we have a Agere PHY */ |
@@ -1022,23 +996,22 @@ static int PHY_Setup(struct ql3_adapter *qdev) | |||
1022 | 996 | ||
1023 | /* Determine which MII address we should be using | 997 | /* Determine which MII address we should be using |
1024 | determined by the index of the card */ | 998 | determined by the index of the card */ |
1025 | if (qdev->mac_index == 0) { | 999 | if (qdev->mac_index == 0) |
1026 | miiAddr = MII_AGERE_ADDR_1; | 1000 | miiAddr = MII_AGERE_ADDR_1; |
1027 | } else { | 1001 | else |
1028 | miiAddr = MII_AGERE_ADDR_2; | 1002 | miiAddr = MII_AGERE_ADDR_2; |
1029 | } | ||
1030 | 1003 | ||
1031 | err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); | 1004 | err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); |
1032 | if(err != 0) { | 1005 | if (err != 0) { |
1033 | netdev_err(qdev->ndev, | 1006 | netdev_err(qdev->ndev, |
1034 | "Could not read from reg PHY_ID_0_REG after Agere detected\n"); | 1007 | "Could not read from reg PHY_ID_0_REG after Agere detected\n"); |
1035 | return err; | 1008 | return err; |
1036 | } | 1009 | } |
1037 | 1010 | ||
1038 | err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); | 1011 | err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); |
1039 | if(err != 0) { | 1012 | if (err != 0) { |
1040 | netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); | 1013 | netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); |
1041 | return err; | 1014 | return err; |
1042 | } | 1015 | } |
1043 | 1016 | ||
1044 | /* We need to remember to initialize the Agere PHY */ | 1017 | /* We need to remember to initialize the Agere PHY */ |
@@ -1066,7 +1039,7 @@ static int PHY_Setup(struct ql3_adapter *qdev) | |||
1066 | static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) | 1039 | static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) |
1067 | { | 1040 | { |
1068 | struct ql3xxx_port_registers __iomem *port_regs = | 1041 | struct ql3xxx_port_registers __iomem *port_regs = |
1069 | qdev->mem_map_registers; | 1042 | qdev->mem_map_registers; |
1070 | u32 value; | 1043 | u32 value; |
1071 | 1044 | ||
1072 | if (enable) | 1045 | if (enable) |
@@ -1086,7 +1059,7 @@ static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) | |||
1086 | static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) | 1059 | static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) |
1087 | { | 1060 | { |
1088 | struct ql3xxx_port_registers __iomem *port_regs = | 1061 | struct ql3xxx_port_registers __iomem *port_regs = |
1089 | qdev->mem_map_registers; | 1062 | qdev->mem_map_registers; |
1090 | u32 value; | 1063 | u32 value; |
1091 | 1064 | ||
1092 | if (enable) | 1065 | if (enable) |
@@ -1106,7 +1079,7 @@ static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) | |||
1106 | static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) | 1079 | static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) |
1107 | { | 1080 | { |
1108 | struct ql3xxx_port_registers __iomem *port_regs = | 1081 | struct ql3xxx_port_registers __iomem *port_regs = |
1109 | qdev->mem_map_registers; | 1082 | qdev->mem_map_registers; |
1110 | u32 value; | 1083 | u32 value; |
1111 | 1084 | ||
1112 | if (enable) | 1085 | if (enable) |
@@ -1126,7 +1099,7 @@ static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) | |||
1126 | static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) | 1099 | static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) |
1127 | { | 1100 | { |
1128 | struct ql3xxx_port_registers __iomem *port_regs = | 1101 | struct ql3xxx_port_registers __iomem *port_regs = |
1129 | qdev->mem_map_registers; | 1102 | qdev->mem_map_registers; |
1130 | u32 value; | 1103 | u32 value; |
1131 | 1104 | ||
1132 | if (enable) | 1105 | if (enable) |
@@ -1146,7 +1119,7 @@ static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) | |||
1146 | static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) | 1119 | static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) |
1147 | { | 1120 | { |
1148 | struct ql3xxx_port_registers __iomem *port_regs = | 1121 | struct ql3xxx_port_registers __iomem *port_regs = |
1149 | qdev->mem_map_registers; | 1122 | qdev->mem_map_registers; |
1150 | u32 value; | 1123 | u32 value; |
1151 | 1124 | ||
1152 | if (enable) | 1125 | if (enable) |
@@ -1168,7 +1141,7 @@ static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) | |||
1168 | static int ql_is_fiber(struct ql3_adapter *qdev) | 1141 | static int ql_is_fiber(struct ql3_adapter *qdev) |
1169 | { | 1142 | { |
1170 | struct ql3xxx_port_registers __iomem *port_regs = | 1143 | struct ql3xxx_port_registers __iomem *port_regs = |
1171 | qdev->mem_map_registers; | 1144 | qdev->mem_map_registers; |
1172 | u32 bitToCheck = 0; | 1145 | u32 bitToCheck = 0; |
1173 | u32 temp; | 1146 | u32 temp; |
1174 | 1147 | ||
@@ -1198,7 +1171,7 @@ static int ql_is_auto_cfg(struct ql3_adapter *qdev) | |||
1198 | static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) | 1171 | static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) |
1199 | { | 1172 | { |
1200 | struct ql3xxx_port_registers __iomem *port_regs = | 1173 | struct ql3xxx_port_registers __iomem *port_regs = |
1201 | qdev->mem_map_registers; | 1174 | qdev->mem_map_registers; |
1202 | u32 bitToCheck = 0; | 1175 | u32 bitToCheck = 0; |
1203 | u32 temp; | 1176 | u32 temp; |
1204 | 1177 | ||
@@ -1234,7 +1207,7 @@ static int ql_is_neg_pause(struct ql3_adapter *qdev) | |||
1234 | static int ql_auto_neg_error(struct ql3_adapter *qdev) | 1207 | static int ql_auto_neg_error(struct ql3_adapter *qdev) |
1235 | { | 1208 | { |
1236 | struct ql3xxx_port_registers __iomem *port_regs = | 1209 | struct ql3xxx_port_registers __iomem *port_regs = |
1237 | qdev->mem_map_registers; | 1210 | qdev->mem_map_registers; |
1238 | u32 bitToCheck = 0; | 1211 | u32 bitToCheck = 0; |
1239 | u32 temp; | 1212 | u32 temp; |
1240 | 1213 | ||
@@ -1272,7 +1245,7 @@ static int ql_is_link_full_dup(struct ql3_adapter *qdev) | |||
1272 | static int ql_link_down_detect(struct ql3_adapter *qdev) | 1245 | static int ql_link_down_detect(struct ql3_adapter *qdev) |
1273 | { | 1246 | { |
1274 | struct ql3xxx_port_registers __iomem *port_regs = | 1247 | struct ql3xxx_port_registers __iomem *port_regs = |
1275 | qdev->mem_map_registers; | 1248 | qdev->mem_map_registers; |
1276 | u32 bitToCheck = 0; | 1249 | u32 bitToCheck = 0; |
1277 | u32 temp; | 1250 | u32 temp; |
1278 | 1251 | ||
@@ -1296,7 +1269,7 @@ static int ql_link_down_detect(struct ql3_adapter *qdev) | |||
1296 | static int ql_link_down_detect_clear(struct ql3_adapter *qdev) | 1269 | static int ql_link_down_detect_clear(struct ql3_adapter *qdev) |
1297 | { | 1270 | { |
1298 | struct ql3xxx_port_registers __iomem *port_regs = | 1271 | struct ql3xxx_port_registers __iomem *port_regs = |
1299 | qdev->mem_map_registers; | 1272 | qdev->mem_map_registers; |
1300 | 1273 | ||
1301 | switch (qdev->mac_index) { | 1274 | switch (qdev->mac_index) { |
1302 | case 0: | 1275 | case 0: |
@@ -1326,7 +1299,7 @@ static int ql_link_down_detect_clear(struct ql3_adapter *qdev) | |||
1326 | static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) | 1299 | static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) |
1327 | { | 1300 | { |
1328 | struct ql3xxx_port_registers __iomem *port_regs = | 1301 | struct ql3xxx_port_registers __iomem *port_regs = |
1329 | qdev->mem_map_registers; | 1302 | qdev->mem_map_registers; |
1330 | u32 bitToCheck = 0; | 1303 | u32 bitToCheck = 0; |
1331 | u32 temp; | 1304 | u32 temp; |
1332 | 1305 | ||
@@ -1363,19 +1336,20 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) | |||
1363 | u16 reg; | 1336 | u16 reg; |
1364 | u16 portConfiguration; | 1337 | u16 portConfiguration; |
1365 | 1338 | ||
1366 | if(qdev->phyType == PHY_AGERE_ET1011C) { | 1339 | if (qdev->phyType == PHY_AGERE_ET1011C) |
1367 | /* turn off external loopback */ | ||
1368 | ql_mii_write_reg(qdev, 0x13, 0x0000); | 1340 | ql_mii_write_reg(qdev, 0x13, 0x0000); |
1369 | } | 1341 | /* turn off external loopback */ |
1370 | 1342 | ||
1371 | if(qdev->mac_index == 0) | 1343 | if (qdev->mac_index == 0) |
1372 | portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration; | 1344 | portConfiguration = |
1345 | qdev->nvram_data.macCfg_port0.portConfiguration; | ||
1373 | else | 1346 | else |
1374 | portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration; | 1347 | portConfiguration = |
1348 | qdev->nvram_data.macCfg_port1.portConfiguration; | ||
1375 | 1349 | ||
1376 | /* Some HBA's in the field are set to 0 and they need to | 1350 | /* Some HBA's in the field are set to 0 and they need to |
1377 | be reinterpreted with a default value */ | 1351 | be reinterpreted with a default value */ |
1378 | if(portConfiguration == 0) | 1352 | if (portConfiguration == 0) |
1379 | portConfiguration = PORT_CONFIG_DEFAULT; | 1353 | portConfiguration = PORT_CONFIG_DEFAULT; |
1380 | 1354 | ||
1381 | /* Set the 1000 advertisements */ | 1355 | /* Set the 1000 advertisements */ |
@@ -1383,8 +1357,8 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) | |||
1383 | PHYAddr[qdev->mac_index]); | 1357 | PHYAddr[qdev->mac_index]); |
1384 | reg &= ~PHY_GIG_ALL_PARAMS; | 1358 | reg &= ~PHY_GIG_ALL_PARAMS; |
1385 | 1359 | ||
1386 | if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { | 1360 | if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { |
1387 | if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) | 1361 | if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) |
1388 | reg |= PHY_GIG_ADV_1000F; | 1362 | reg |= PHY_GIG_ADV_1000F; |
1389 | else | 1363 | else |
1390 | reg |= PHY_GIG_ADV_1000H; | 1364 | reg |= PHY_GIG_ADV_1000H; |
@@ -1398,29 +1372,27 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) | |||
1398 | PHYAddr[qdev->mac_index]); | 1372 | PHYAddr[qdev->mac_index]); |
1399 | reg &= ~PHY_NEG_ALL_PARAMS; | 1373 | reg &= ~PHY_NEG_ALL_PARAMS; |
1400 | 1374 | ||
1401 | if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) | 1375 | if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) |
1402 | reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; | 1376 | reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; |
1403 | 1377 | ||
1404 | if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { | 1378 | if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { |
1405 | if(portConfiguration & PORT_CONFIG_100MB_SPEED) | 1379 | if (portConfiguration & PORT_CONFIG_100MB_SPEED) |
1406 | reg |= PHY_NEG_ADV_100F; | 1380 | reg |= PHY_NEG_ADV_100F; |
1407 | 1381 | ||
1408 | if(portConfiguration & PORT_CONFIG_10MB_SPEED) | 1382 | if (portConfiguration & PORT_CONFIG_10MB_SPEED) |
1409 | reg |= PHY_NEG_ADV_10F; | 1383 | reg |= PHY_NEG_ADV_10F; |
1410 | } | 1384 | } |
1411 | 1385 | ||
1412 | if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { | 1386 | if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { |
1413 | if(portConfiguration & PORT_CONFIG_100MB_SPEED) | 1387 | if (portConfiguration & PORT_CONFIG_100MB_SPEED) |
1414 | reg |= PHY_NEG_ADV_100H; | 1388 | reg |= PHY_NEG_ADV_100H; |
1415 | 1389 | ||
1416 | if(portConfiguration & PORT_CONFIG_10MB_SPEED) | 1390 | if (portConfiguration & PORT_CONFIG_10MB_SPEED) |
1417 | reg |= PHY_NEG_ADV_10H; | 1391 | reg |= PHY_NEG_ADV_10H; |
1418 | } | 1392 | } |
1419 | 1393 | ||
1420 | if(portConfiguration & | 1394 | if (portConfiguration & PORT_CONFIG_1000MB_SPEED) |
1421 | PORT_CONFIG_1000MB_SPEED) { | ||
1422 | reg |= 1; | 1395 | reg |= 1; |
1423 | } | ||
1424 | 1396 | ||
1425 | ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, | 1397 | ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, |
1426 | PHYAddr[qdev->mac_index]); | 1398 | PHYAddr[qdev->mac_index]); |
@@ -1445,7 +1417,7 @@ static void ql_phy_init_ex(struct ql3_adapter *qdev) | |||
1445 | static u32 ql_get_link_state(struct ql3_adapter *qdev) | 1417 | static u32 ql_get_link_state(struct ql3_adapter *qdev) |
1446 | { | 1418 | { |
1447 | struct ql3xxx_port_registers __iomem *port_regs = | 1419 | struct ql3xxx_port_registers __iomem *port_regs = |
1448 | qdev->mem_map_registers; | 1420 | qdev->mem_map_registers; |
1449 | u32 bitToCheck = 0; | 1421 | u32 bitToCheck = 0; |
1450 | u32 temp, linkState; | 1422 | u32 temp, linkState; |
1451 | 1423 | ||
@@ -1457,18 +1429,19 @@ static u32 ql_get_link_state(struct ql3_adapter *qdev) | |||
1457 | bitToCheck = PORT_STATUS_UP1; | 1429 | bitToCheck = PORT_STATUS_UP1; |
1458 | break; | 1430 | break; |
1459 | } | 1431 | } |
1432 | |||
1460 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); | 1433 | temp = ql_read_page0_reg(qdev, &port_regs->portStatus); |
1461 | if (temp & bitToCheck) { | 1434 | if (temp & bitToCheck) |
1462 | linkState = LS_UP; | 1435 | linkState = LS_UP; |
1463 | } else { | 1436 | else |
1464 | linkState = LS_DOWN; | 1437 | linkState = LS_DOWN; |
1465 | } | 1438 | |
1466 | return linkState; | 1439 | return linkState; |
1467 | } | 1440 | } |
1468 | 1441 | ||
1469 | static int ql_port_start(struct ql3_adapter *qdev) | 1442 | static int ql_port_start(struct ql3_adapter *qdev) |
1470 | { | 1443 | { |
1471 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 1444 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
1472 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 1445 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * |
1473 | 2) << 7)) { | 1446 | 2) << 7)) { |
1474 | netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); | 1447 | netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); |
@@ -1489,13 +1462,13 @@ static int ql_port_start(struct ql3_adapter *qdev) | |||
1489 | static int ql_finish_auto_neg(struct ql3_adapter *qdev) | 1462 | static int ql_finish_auto_neg(struct ql3_adapter *qdev) |
1490 | { | 1463 | { |
1491 | 1464 | ||
1492 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 1465 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
1493 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 1466 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * |
1494 | 2) << 7)) | 1467 | 2) << 7)) |
1495 | return -1; | 1468 | return -1; |
1496 | 1469 | ||
1497 | if (!ql_auto_neg_error(qdev)) { | 1470 | if (!ql_auto_neg_error(qdev)) { |
1498 | if (test_bit(QL_LINK_MASTER,&qdev->flags)) { | 1471 | if (test_bit(QL_LINK_MASTER, &qdev->flags)) { |
1499 | /* configure the MAC */ | 1472 | /* configure the MAC */ |
1500 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, | 1473 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, |
1501 | "Configuring link\n"); | 1474 | "Configuring link\n"); |
@@ -1528,7 +1501,7 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev) | |||
1528 | 1501 | ||
1529 | } else { /* Remote error detected */ | 1502 | } else { /* Remote error detected */ |
1530 | 1503 | ||
1531 | if (test_bit(QL_LINK_MASTER,&qdev->flags)) { | 1504 | if (test_bit(QL_LINK_MASTER, &qdev->flags)) { |
1532 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, | 1505 | netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, |
1533 | "Remote error detected. Calling ql_port_start()\n"); | 1506 | "Remote error detected. Calling ql_port_start()\n"); |
1534 | /* | 1507 | /* |
@@ -1536,10 +1509,9 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev) | |||
1536 | * to lock the PHY on it's own. | 1509 | * to lock the PHY on it's own. |
1537 | */ | 1510 | */ |
1538 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | 1511 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); |
1539 | if(ql_port_start(qdev)) {/* Restart port */ | 1512 | if (ql_port_start(qdev)) /* Restart port */ |
1540 | return -1; | 1513 | return -1; |
1541 | } else | 1514 | return 0; |
1542 | return 0; | ||
1543 | } | 1515 | } |
1544 | } | 1516 | } |
1545 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); | 1517 | ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); |
@@ -1558,7 +1530,7 @@ static void ql_link_state_machine_work(struct work_struct *work) | |||
1558 | 1530 | ||
1559 | curr_link_state = ql_get_link_state(qdev); | 1531 | curr_link_state = ql_get_link_state(qdev); |
1560 | 1532 | ||
1561 | if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { | 1533 | if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { |
1562 | netif_info(qdev, link, qdev->ndev, | 1534 | netif_info(qdev, link, qdev->ndev, |
1563 | "Reset in progress, skip processing link state\n"); | 1535 | "Reset in progress, skip processing link state\n"); |
1564 | 1536 | ||
@@ -1572,9 +1544,8 @@ static void ql_link_state_machine_work(struct work_struct *work) | |||
1572 | 1544 | ||
1573 | switch (qdev->port_link_state) { | 1545 | switch (qdev->port_link_state) { |
1574 | default: | 1546 | default: |
1575 | if (test_bit(QL_LINK_MASTER,&qdev->flags)) { | 1547 | if (test_bit(QL_LINK_MASTER, &qdev->flags)) |
1576 | ql_port_start(qdev); | 1548 | ql_port_start(qdev); |
1577 | } | ||
1578 | qdev->port_link_state = LS_DOWN; | 1549 | qdev->port_link_state = LS_DOWN; |
1579 | /* Fall Through */ | 1550 | /* Fall Through */ |
1580 | 1551 | ||
@@ -1616,9 +1587,9 @@ static void ql_link_state_machine_work(struct work_struct *work) | |||
1616 | static void ql_get_phy_owner(struct ql3_adapter *qdev) | 1587 | static void ql_get_phy_owner(struct ql3_adapter *qdev) |
1617 | { | 1588 | { |
1618 | if (ql_this_adapter_controls_port(qdev)) | 1589 | if (ql_this_adapter_controls_port(qdev)) |
1619 | set_bit(QL_LINK_MASTER,&qdev->flags); | 1590 | set_bit(QL_LINK_MASTER, &qdev->flags); |
1620 | else | 1591 | else |
1621 | clear_bit(QL_LINK_MASTER,&qdev->flags); | 1592 | clear_bit(QL_LINK_MASTER, &qdev->flags); |
1622 | } | 1593 | } |
1623 | 1594 | ||
1624 | /* | 1595 | /* |
@@ -1628,7 +1599,7 @@ static void ql_init_scan_mode(struct ql3_adapter *qdev) | |||
1628 | { | 1599 | { |
1629 | ql_mii_enable_scan_mode(qdev); | 1600 | ql_mii_enable_scan_mode(qdev); |
1630 | 1601 | ||
1631 | if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { | 1602 | if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { |
1632 | if (ql_this_adapter_controls_port(qdev)) | 1603 | if (ql_this_adapter_controls_port(qdev)) |
1633 | ql_petbi_init_ex(qdev); | 1604 | ql_petbi_init_ex(qdev); |
1634 | } else { | 1605 | } else { |
@@ -1638,18 +1609,18 @@ static void ql_init_scan_mode(struct ql3_adapter *qdev) | |||
1638 | } | 1609 | } |
1639 | 1610 | ||
1640 | /* | 1611 | /* |
1641 | * MII_Setup needs to be called before taking the PHY out of reset so that the | 1612 | * MII_Setup needs to be called before taking the PHY out of reset |
1642 | * management interface clock speed can be set properly. It would be better if | 1613 | * so that the management interface clock speed can be set properly. |
1643 | * we had a way to disable MDC until after the PHY is out of reset, but we | 1614 | * It would be better if we had a way to disable MDC until after the |
1644 | * don't have that capability. | 1615 | * PHY is out of reset, but we don't have that capability. |
1645 | */ | 1616 | */ |
1646 | static int ql_mii_setup(struct ql3_adapter *qdev) | 1617 | static int ql_mii_setup(struct ql3_adapter *qdev) |
1647 | { | 1618 | { |
1648 | u32 reg; | 1619 | u32 reg; |
1649 | struct ql3xxx_port_registers __iomem *port_regs = | 1620 | struct ql3xxx_port_registers __iomem *port_regs = |
1650 | qdev->mem_map_registers; | 1621 | qdev->mem_map_registers; |
1651 | 1622 | ||
1652 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 1623 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
1653 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 1624 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * |
1654 | 2) << 7)) | 1625 | 2) << 7)) |
1655 | return -1; | 1626 | return -1; |
@@ -1668,24 +1639,24 @@ static int ql_mii_setup(struct ql3_adapter *qdev) | |||
1668 | return 0; | 1639 | return 0; |
1669 | } | 1640 | } |
1670 | 1641 | ||
1642 | #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ | ||
1643 | SUPPORTED_FIBRE | \ | ||
1644 | SUPPORTED_Autoneg) | ||
1645 | #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ | ||
1646 | SUPPORTED_10baseT_Full | \ | ||
1647 | SUPPORTED_100baseT_Half | \ | ||
1648 | SUPPORTED_100baseT_Full | \ | ||
1649 | SUPPORTED_1000baseT_Half | \ | ||
1650 | SUPPORTED_1000baseT_Full | \ | ||
1651 | SUPPORTED_Autoneg | \ | ||
1652 | SUPPORTED_TP); \ | ||
1653 | |||
1671 | static u32 ql_supported_modes(struct ql3_adapter *qdev) | 1654 | static u32 ql_supported_modes(struct ql3_adapter *qdev) |
1672 | { | 1655 | { |
1673 | u32 supported; | 1656 | if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) |
1674 | 1657 | return SUPPORTED_OPTICAL_MODES; | |
1675 | if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { | ||
1676 | supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE | ||
1677 | | SUPPORTED_Autoneg; | ||
1678 | } else { | ||
1679 | supported = SUPPORTED_10baseT_Half | ||
1680 | | SUPPORTED_10baseT_Full | ||
1681 | | SUPPORTED_100baseT_Half | ||
1682 | | SUPPORTED_100baseT_Full | ||
1683 | | SUPPORTED_1000baseT_Half | ||
1684 | | SUPPORTED_1000baseT_Full | ||
1685 | | SUPPORTED_Autoneg | SUPPORTED_TP; | ||
1686 | } | ||
1687 | 1658 | ||
1688 | return supported; | 1659 | return SUPPORTED_TP_MODES; |
1689 | } | 1660 | } |
1690 | 1661 | ||
1691 | static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) | 1662 | static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) |
@@ -1693,9 +1664,9 @@ static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) | |||
1693 | int status; | 1664 | int status; |
1694 | unsigned long hw_flags; | 1665 | unsigned long hw_flags; |
1695 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 1666 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
1696 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 1667 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
1697 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 1668 | (QL_RESOURCE_BITS_BASE_CODE | |
1698 | 2) << 7)) { | 1669 | (qdev->mac_index) * 2) << 7)) { |
1699 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 1670 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
1700 | return 0; | 1671 | return 0; |
1701 | } | 1672 | } |
@@ -1710,9 +1681,9 @@ static u32 ql_get_speed(struct ql3_adapter *qdev) | |||
1710 | u32 status; | 1681 | u32 status; |
1711 | unsigned long hw_flags; | 1682 | unsigned long hw_flags; |
1712 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 1683 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
1713 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 1684 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
1714 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 1685 | (QL_RESOURCE_BITS_BASE_CODE | |
1715 | 2) << 7)) { | 1686 | (qdev->mac_index) * 2) << 7)) { |
1716 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 1687 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
1717 | return 0; | 1688 | return 0; |
1718 | } | 1689 | } |
@@ -1727,9 +1698,9 @@ static int ql_get_full_dup(struct ql3_adapter *qdev) | |||
1727 | int status; | 1698 | int status; |
1728 | unsigned long hw_flags; | 1699 | unsigned long hw_flags; |
1729 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 1700 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
1730 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 1701 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
1731 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 1702 | (QL_RESOURCE_BITS_BASE_CODE | |
1732 | 2) << 7)) { | 1703 | (qdev->mac_index) * 2) << 7)) { |
1733 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 1704 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
1734 | return 0; | 1705 | return 0; |
1735 | } | 1706 | } |
@@ -1739,7 +1710,6 @@ static int ql_get_full_dup(struct ql3_adapter *qdev) | |||
1739 | return status; | 1710 | return status; |
1740 | } | 1711 | } |
1741 | 1712 | ||
1742 | |||
1743 | static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) | 1713 | static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) |
1744 | { | 1714 | { |
1745 | struct ql3_adapter *qdev = netdev_priv(ndev); | 1715 | struct ql3_adapter *qdev = netdev_priv(ndev); |
@@ -1747,7 +1717,7 @@ static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) | |||
1747 | ecmd->transceiver = XCVR_INTERNAL; | 1717 | ecmd->transceiver = XCVR_INTERNAL; |
1748 | ecmd->supported = ql_supported_modes(qdev); | 1718 | ecmd->supported = ql_supported_modes(qdev); |
1749 | 1719 | ||
1750 | if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { | 1720 | if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { |
1751 | ecmd->port = PORT_FIBRE; | 1721 | ecmd->port = PORT_FIBRE; |
1752 | } else { | 1722 | } else { |
1753 | ecmd->port = PORT_TP; | 1723 | ecmd->port = PORT_TP; |
@@ -1788,10 +1758,11 @@ static void ql_get_pauseparam(struct net_device *ndev, | |||
1788 | struct ethtool_pauseparam *pause) | 1758 | struct ethtool_pauseparam *pause) |
1789 | { | 1759 | { |
1790 | struct ql3_adapter *qdev = netdev_priv(ndev); | 1760 | struct ql3_adapter *qdev = netdev_priv(ndev); |
1791 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 1761 | struct ql3xxx_port_registers __iomem *port_regs = |
1762 | qdev->mem_map_registers; | ||
1792 | 1763 | ||
1793 | u32 reg; | 1764 | u32 reg; |
1794 | if(qdev->mac_index == 0) | 1765 | if (qdev->mac_index == 0) |
1795 | reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); | 1766 | reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); |
1796 | else | 1767 | else |
1797 | reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); | 1768 | reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); |
@@ -1818,8 +1789,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1818 | 1789 | ||
1819 | while (lrg_buf_cb) { | 1790 | while (lrg_buf_cb) { |
1820 | if (!lrg_buf_cb->skb) { | 1791 | if (!lrg_buf_cb->skb) { |
1821 | lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, | 1792 | lrg_buf_cb->skb = |
1822 | qdev->lrg_buffer_len); | 1793 | netdev_alloc_skb(qdev->ndev, |
1794 | qdev->lrg_buffer_len); | ||
1823 | if (unlikely(!lrg_buf_cb->skb)) { | 1795 | if (unlikely(!lrg_buf_cb->skb)) { |
1824 | netdev_printk(KERN_DEBUG, qdev->ndev, | 1796 | netdev_printk(KERN_DEBUG, qdev->ndev, |
1825 | "Failed netdev_alloc_skb()\n"); | 1797 | "Failed netdev_alloc_skb()\n"); |
@@ -1837,7 +1809,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1837 | PCI_DMA_FROMDEVICE); | 1809 | PCI_DMA_FROMDEVICE); |
1838 | 1810 | ||
1839 | err = pci_dma_mapping_error(qdev->pdev, map); | 1811 | err = pci_dma_mapping_error(qdev->pdev, map); |
1840 | if(err) { | 1812 | if (err) { |
1841 | netdev_err(qdev->ndev, | 1813 | netdev_err(qdev->ndev, |
1842 | "PCI mapping failed with error: %d\n", | 1814 | "PCI mapping failed with error: %d\n", |
1843 | err); | 1815 | err); |
@@ -1848,9 +1820,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1848 | 1820 | ||
1849 | 1821 | ||
1850 | lrg_buf_cb->buf_phy_addr_low = | 1822 | lrg_buf_cb->buf_phy_addr_low = |
1851 | cpu_to_le32(LS_64BITS(map)); | 1823 | cpu_to_le32(LS_64BITS(map)); |
1852 | lrg_buf_cb->buf_phy_addr_high = | 1824 | lrg_buf_cb->buf_phy_addr_high = |
1853 | cpu_to_le32(MS_64BITS(map)); | 1825 | cpu_to_le32(MS_64BITS(map)); |
1854 | dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); | 1826 | dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); |
1855 | dma_unmap_len_set(lrg_buf_cb, maplen, | 1827 | dma_unmap_len_set(lrg_buf_cb, maplen, |
1856 | qdev->lrg_buffer_len - | 1828 | qdev->lrg_buffer_len - |
@@ -1870,7 +1842,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1870 | */ | 1842 | */ |
1871 | static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) | 1843 | static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) |
1872 | { | 1844 | { |
1873 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 1845 | struct ql3xxx_port_registers __iomem *port_regs = |
1846 | qdev->mem_map_registers; | ||
1847 | |||
1874 | if (qdev->small_buf_release_cnt >= 16) { | 1848 | if (qdev->small_buf_release_cnt >= 16) { |
1875 | while (qdev->small_buf_release_cnt >= 16) { | 1849 | while (qdev->small_buf_release_cnt >= 16) { |
1876 | qdev->small_buf_q_producer_index++; | 1850 | qdev->small_buf_q_producer_index++; |
@@ -1894,7 +1868,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) | |||
1894 | struct bufq_addr_element *lrg_buf_q_ele; | 1868 | struct bufq_addr_element *lrg_buf_q_ele; |
1895 | int i; | 1869 | int i; |
1896 | struct ql_rcv_buf_cb *lrg_buf_cb; | 1870 | struct ql_rcv_buf_cb *lrg_buf_cb; |
1897 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 1871 | struct ql3xxx_port_registers __iomem *port_regs = |
1872 | qdev->mem_map_registers; | ||
1898 | 1873 | ||
1899 | if ((qdev->lrg_buf_free_count >= 8) && | 1874 | if ((qdev->lrg_buf_free_count >= 8) && |
1900 | (qdev->lrg_buf_release_cnt >= 16)) { | 1875 | (qdev->lrg_buf_release_cnt >= 16)) { |
@@ -1922,7 +1897,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) | |||
1922 | 1897 | ||
1923 | qdev->lrg_buf_q_producer_index++; | 1898 | qdev->lrg_buf_q_producer_index++; |
1924 | 1899 | ||
1925 | if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries) | 1900 | if (qdev->lrg_buf_q_producer_index == |
1901 | qdev->num_lbufq_entries) | ||
1926 | qdev->lrg_buf_q_producer_index = 0; | 1902 | qdev->lrg_buf_q_producer_index = 0; |
1927 | 1903 | ||
1928 | if (qdev->lrg_buf_q_producer_index == | 1904 | if (qdev->lrg_buf_q_producer_index == |
@@ -1944,7 +1920,7 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, | |||
1944 | int i; | 1920 | int i; |
1945 | int retval = 0; | 1921 | int retval = 0; |
1946 | 1922 | ||
1947 | if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { | 1923 | if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { |
1948 | netdev_warn(qdev->ndev, | 1924 | netdev_warn(qdev->ndev, |
1949 | "Frame too short but it was padded and sent\n"); | 1925 | "Frame too short but it was padded and sent\n"); |
1950 | } | 1926 | } |
@@ -1952,7 +1928,7 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, | |||
1952 | tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; | 1928 | tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; |
1953 | 1929 | ||
1954 | /* Check the transmit response flags for any errors */ | 1930 | /* Check the transmit response flags for any errors */ |
1955 | if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { | 1931 | if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { |
1956 | netdev_err(qdev->ndev, | 1932 | netdev_err(qdev->ndev, |
1957 | "Frame too short to be legal, frame not sent\n"); | 1933 | "Frame too short to be legal, frame not sent\n"); |
1958 | 1934 | ||
@@ -1961,7 +1937,7 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, | |||
1961 | goto frame_not_sent; | 1937 | goto frame_not_sent; |
1962 | } | 1938 | } |
1963 | 1939 | ||
1964 | if(tx_cb->seg_count == 0) { | 1940 | if (tx_cb->seg_count == 0) { |
1965 | netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", | 1941 | netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", |
1966 | mac_rsp->transaction_id); | 1942 | mac_rsp->transaction_id); |
1967 | 1943 | ||
@@ -2009,7 +1985,7 @@ static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) | |||
2009 | qdev->lrg_buf_release_cnt++; | 1985 | qdev->lrg_buf_release_cnt++; |
2010 | if (++qdev->lrg_buf_index == qdev->num_large_buffers) | 1986 | if (++qdev->lrg_buf_index == qdev->num_large_buffers) |
2011 | qdev->lrg_buf_index = 0; | 1987 | qdev->lrg_buf_index = 0; |
2012 | return(lrg_buf_cb); | 1988 | return lrg_buf_cb; |
2013 | } | 1989 | } |
2014 | 1990 | ||
2015 | /* | 1991 | /* |
@@ -2150,8 +2126,8 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, | |||
2150 | net_rsp = qdev->rsp_current; | 2126 | net_rsp = qdev->rsp_current; |
2151 | rmb(); | 2127 | rmb(); |
2152 | /* | 2128 | /* |
2153 | * Fix 4032 chipe undocumented "feature" where bit-8 is set if the | 2129 | * Fix 4032 chip's undocumented "feature" where bit-8 is set |
2154 | * inbound completion is for a VLAN. | 2130 | * if the inbound completion is for a VLAN. |
2155 | */ | 2131 | */ |
2156 | if (qdev->device_id == QL3032_DEVICE_ID) | 2132 | if (qdev->device_id == QL3032_DEVICE_ID) |
2157 | net_rsp->opcode &= 0x7f; | 2133 | net_rsp->opcode &= 0x7f; |
@@ -2177,19 +2153,18 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, | |||
2177 | net_rsp); | 2153 | net_rsp); |
2178 | (*rx_cleaned)++; | 2154 | (*rx_cleaned)++; |
2179 | break; | 2155 | break; |
2180 | default: | 2156 | default: { |
2181 | { | 2157 | u32 *tmp = (u32 *)net_rsp; |
2182 | u32 *tmp = (u32 *) net_rsp; | 2158 | netdev_err(ndev, |
2183 | netdev_err(ndev, | 2159 | "Hit default case, not handled!\n" |
2184 | "Hit default case, not handled!\n" | 2160 | " dropping the packet, opcode = %x\n" |
2185 | " dropping the packet, opcode = %x\n" | 2161 | "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", |
2186 | "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", | 2162 | net_rsp->opcode, |
2187 | net_rsp->opcode, | 2163 | (unsigned long int)tmp[0], |
2188 | (unsigned long int)tmp[0], | 2164 | (unsigned long int)tmp[1], |
2189 | (unsigned long int)tmp[1], | 2165 | (unsigned long int)tmp[2], |
2190 | (unsigned long int)tmp[2], | 2166 | (unsigned long int)tmp[3]); |
2191 | (unsigned long int)tmp[3]); | 2167 | } |
2192 | } | ||
2193 | } | 2168 | } |
2194 | 2169 | ||
2195 | qdev->rsp_consumer_index++; | 2170 | qdev->rsp_consumer_index++; |
@@ -2212,7 +2187,8 @@ static int ql_poll(struct napi_struct *napi, int budget) | |||
2212 | struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); | 2187 | struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); |
2213 | int rx_cleaned = 0, tx_cleaned = 0; | 2188 | int rx_cleaned = 0, tx_cleaned = 0; |
2214 | unsigned long hw_flags; | 2189 | unsigned long hw_flags; |
2215 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 2190 | struct ql3xxx_port_registers __iomem *port_regs = |
2191 | qdev->mem_map_registers; | ||
2216 | 2192 | ||
2217 | ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); | 2193 | ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); |
2218 | 2194 | ||
@@ -2235,15 +2211,14 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | |||
2235 | 2211 | ||
2236 | struct net_device *ndev = dev_id; | 2212 | struct net_device *ndev = dev_id; |
2237 | struct ql3_adapter *qdev = netdev_priv(ndev); | 2213 | struct ql3_adapter *qdev = netdev_priv(ndev); |
2238 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 2214 | struct ql3xxx_port_registers __iomem *port_regs = |
2215 | qdev->mem_map_registers; | ||
2239 | u32 value; | 2216 | u32 value; |
2240 | int handled = 1; | 2217 | int handled = 1; |
2241 | u32 var; | 2218 | u32 var; |
2242 | 2219 | ||
2243 | port_regs = qdev->mem_map_registers; | 2220 | value = ql_read_common_reg_l(qdev, |
2244 | 2221 | &port_regs->CommonRegs.ispControlStatus); | |
2245 | value = | ||
2246 | ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); | ||
2247 | 2222 | ||
2248 | if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { | 2223 | if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { |
2249 | spin_lock(&qdev->adapter_lock); | 2224 | spin_lock(&qdev->adapter_lock); |
@@ -2251,7 +2226,7 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | |||
2251 | netif_carrier_off(qdev->ndev); | 2226 | netif_carrier_off(qdev->ndev); |
2252 | ql_disable_interrupts(qdev); | 2227 | ql_disable_interrupts(qdev); |
2253 | qdev->port_link_state = LS_DOWN; | 2228 | qdev->port_link_state = LS_DOWN; |
2254 | set_bit(QL_RESET_ACTIVE,&qdev->flags) ; | 2229 | set_bit(QL_RESET_ACTIVE, &qdev->flags) ; |
2255 | 2230 | ||
2256 | if (value & ISP_CONTROL_FE) { | 2231 | if (value & ISP_CONTROL_FE) { |
2257 | /* | 2232 | /* |
@@ -2263,12 +2238,12 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | |||
2263 | netdev_warn(ndev, | 2238 | netdev_warn(ndev, |
2264 | "Resetting chip. PortFatalErrStatus register = 0x%x\n", | 2239 | "Resetting chip. PortFatalErrStatus register = 0x%x\n", |
2265 | var); | 2240 | var); |
2266 | set_bit(QL_RESET_START,&qdev->flags) ; | 2241 | set_bit(QL_RESET_START, &qdev->flags) ; |
2267 | } else { | 2242 | } else { |
2268 | /* | 2243 | /* |
2269 | * Soft Reset Requested. | 2244 | * Soft Reset Requested. |
2270 | */ | 2245 | */ |
2271 | set_bit(QL_RESET_PER_SCSI,&qdev->flags) ; | 2246 | set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; |
2272 | netdev_err(ndev, | 2247 | netdev_err(ndev, |
2273 | "Another function issued a reset to the chip. ISR value = %x\n", | 2248 | "Another function issued a reset to the chip. ISR value = %x\n", |
2274 | value); | 2249 | value); |
@@ -2277,52 +2252,36 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | |||
2277 | spin_unlock(&qdev->adapter_lock); | 2252 | spin_unlock(&qdev->adapter_lock); |
2278 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { | 2253 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { |
2279 | ql_disable_interrupts(qdev); | 2254 | ql_disable_interrupts(qdev); |
2280 | if (likely(napi_schedule_prep(&qdev->napi))) { | 2255 | if (likely(napi_schedule_prep(&qdev->napi))) |
2281 | __napi_schedule(&qdev->napi); | 2256 | __napi_schedule(&qdev->napi); |
2282 | } | 2257 | } else |
2283 | } else { | ||
2284 | return IRQ_NONE; | 2258 | return IRQ_NONE; |
2285 | } | ||
2286 | 2259 | ||
2287 | return IRQ_RETVAL(handled); | 2260 | return IRQ_RETVAL(handled); |
2288 | } | 2261 | } |
2289 | 2262 | ||
2290 | /* | 2263 | /* |
2291 | * Get the total number of segments needed for the | 2264 | * Get the total number of segments needed for the given number of fragments. |
2292 | * given number of fragments. This is necessary because | 2265 | * This is necessary because outbound address lists (OAL) will be used when |
2293 | * outbound address lists (OAL) will be used when more than | 2266 | * more than two frags are given. Each address list has 5 addr/len pairs. |
2294 | * two frags are given. Each address list has 5 addr/len | 2267 | * The 5th pair in each OAL is used to point to the next OAL if more frags |
2295 | * pairs. The 5th pair in each AOL is used to point to | 2268 | * are coming. That is why the frags:segment count ratio is not linear. |
2296 | * the next AOL if more frags are coming. | ||
2297 | * That is why the frags:segment count ratio is not linear. | ||
2298 | */ | 2269 | */ |
2299 | static int ql_get_seg_count(struct ql3_adapter *qdev, | 2270 | static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) |
2300 | unsigned short frags) | ||
2301 | { | 2271 | { |
2302 | if (qdev->device_id == QL3022_DEVICE_ID) | 2272 | if (qdev->device_id == QL3022_DEVICE_ID) |
2303 | return 1; | 2273 | return 1; |
2304 | 2274 | ||
2305 | switch(frags) { | 2275 | if (frags <= 2) |
2306 | case 0: return 1; /* just the skb->data seg */ | 2276 | return frags + 1; |
2307 | case 1: return 2; /* skb->data + 1 frag */ | 2277 | else if (frags <= 6) |
2308 | case 2: return 3; /* skb->data + 2 frags */ | 2278 | return frags + 2; |
2309 | case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */ | 2279 | else if (frags <= 10) |
2310 | case 4: return 6; | 2280 | return frags + 3; |
2311 | case 5: return 7; | 2281 | else if (frags <= 14) |
2312 | case 6: return 8; | 2282 | return frags + 4; |
2313 | case 7: return 10; | 2283 | else if (frags <= 18) |
2314 | case 8: return 11; | 2284 | return frags + 5; |
2315 | case 9: return 12; | ||
2316 | case 10: return 13; | ||
2317 | case 11: return 15; | ||
2318 | case 12: return 16; | ||
2319 | case 13: return 17; | ||
2320 | case 14: return 18; | ||
2321 | case 15: return 20; | ||
2322 | case 16: return 21; | ||
2323 | case 17: return 22; | ||
2324 | case 18: return 23; | ||
2325 | } | ||
2326 | return -1; | 2285 | return -1; |
2327 | } | 2286 | } |
2328 | 2287 | ||
@@ -2345,8 +2304,8 @@ static void ql_hw_csum_setup(const struct sk_buff *skb, | |||
2345 | } | 2304 | } |
2346 | 2305 | ||
2347 | /* | 2306 | /* |
2348 | * Map the buffers for this transmit. This will return | 2307 | * Map the buffers for this transmit. |
2349 | * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. | 2308 | * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. |
2350 | */ | 2309 | */ |
2351 | static int ql_send_map(struct ql3_adapter *qdev, | 2310 | static int ql_send_map(struct ql3_adapter *qdev, |
2352 | struct ob_mac_iocb_req *mac_iocb_ptr, | 2311 | struct ob_mac_iocb_req *mac_iocb_ptr, |
@@ -2369,7 +2328,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2369 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2328 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2370 | 2329 | ||
2371 | err = pci_dma_mapping_error(qdev->pdev, map); | 2330 | err = pci_dma_mapping_error(qdev->pdev, map); |
2372 | if(err) { | 2331 | if (err) { |
2373 | netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", | 2332 | netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", |
2374 | err); | 2333 | err); |
2375 | 2334 | ||
@@ -2387,67 +2346,67 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2387 | if (seg_cnt == 1) { | 2346 | if (seg_cnt == 1) { |
2388 | /* Terminate the last segment. */ | 2347 | /* Terminate the last segment. */ |
2389 | oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); | 2348 | oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); |
2390 | } else { | 2349 | return NETDEV_TX_OK; |
2391 | oal = tx_cb->oal; | 2350 | } |
2392 | for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) { | 2351 | oal = tx_cb->oal; |
2393 | skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; | 2352 | for (completed_segs = 0; |
2394 | oal_entry++; | 2353 | completed_segs < frag_cnt; |
2395 | if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ | 2354 | completed_segs++, seg++) { |
2396 | (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ | 2355 | skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; |
2397 | (seg == 12 && seg_cnt > 13) || /* but necessary. */ | 2356 | oal_entry++; |
2398 | (seg == 17 && seg_cnt > 18)) { | 2357 | /* |
2399 | /* Continuation entry points to outbound address list. */ | 2358 | * Check for continuation requirements. |
2400 | map = pci_map_single(qdev->pdev, oal, | 2359 | * It's strange but necessary. |
2401 | sizeof(struct oal), | 2360 | * Continuation entry points to outbound address list. |
2402 | PCI_DMA_TODEVICE); | 2361 | */ |
2403 | 2362 | if ((seg == 2 && seg_cnt > 3) || | |
2404 | err = pci_dma_mapping_error(qdev->pdev, map); | 2363 | (seg == 7 && seg_cnt > 8) || |
2405 | if(err) { | 2364 | (seg == 12 && seg_cnt > 13) || |
2406 | 2365 | (seg == 17 && seg_cnt > 18)) { | |
2407 | netdev_err(qdev->ndev, | 2366 | map = pci_map_single(qdev->pdev, oal, |
2408 | "PCI mapping outbound address list with error: %d\n", | 2367 | sizeof(struct oal), |
2409 | err); | 2368 | PCI_DMA_TODEVICE); |
2410 | goto map_error; | ||
2411 | } | ||
2412 | |||
2413 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); | ||
2414 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | ||
2415 | oal_entry->len = | ||
2416 | cpu_to_le32(sizeof(struct oal) | | ||
2417 | OAL_CONT_ENTRY); | ||
2418 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, | ||
2419 | map); | ||
2420 | dma_unmap_len_set(&tx_cb->map[seg], maplen, | ||
2421 | sizeof(struct oal)); | ||
2422 | oal_entry = (struct oal_entry *)oal; | ||
2423 | oal++; | ||
2424 | seg++; | ||
2425 | } | ||
2426 | |||
2427 | map = | ||
2428 | pci_map_page(qdev->pdev, frag->page, | ||
2429 | frag->page_offset, frag->size, | ||
2430 | PCI_DMA_TODEVICE); | ||
2431 | 2369 | ||
2432 | err = pci_dma_mapping_error(qdev->pdev, map); | 2370 | err = pci_dma_mapping_error(qdev->pdev, map); |
2433 | if(err) { | 2371 | if (err) { |
2434 | netdev_err(qdev->ndev, | 2372 | netdev_err(qdev->ndev, |
2435 | "PCI mapping frags failed with error: %d\n", | 2373 | "PCI mapping outbound address list with error: %d\n", |
2436 | err); | 2374 | err); |
2437 | goto map_error; | 2375 | goto map_error; |
2438 | } | 2376 | } |
2439 | 2377 | ||
2440 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); | 2378 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); |
2441 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | 2379 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); |
2442 | oal_entry->len = cpu_to_le32(frag->size); | 2380 | oal_entry->len = cpu_to_le32(sizeof(struct oal) | |
2381 | OAL_CONT_ENTRY); | ||
2443 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); | 2382 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); |
2444 | dma_unmap_len_set(&tx_cb->map[seg], maplen, | 2383 | dma_unmap_len_set(&tx_cb->map[seg], maplen, |
2445 | frag->size); | 2384 | sizeof(struct oal)); |
2385 | oal_entry = (struct oal_entry *)oal; | ||
2386 | oal++; | ||
2387 | seg++; | ||
2446 | } | 2388 | } |
2447 | /* Terminate the last segment. */ | ||
2448 | oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); | ||
2449 | } | ||
2450 | 2389 | ||
2390 | map = pci_map_page(qdev->pdev, frag->page, | ||
2391 | frag->page_offset, frag->size, | ||
2392 | PCI_DMA_TODEVICE); | ||
2393 | |||
2394 | err = pci_dma_mapping_error(qdev->pdev, map); | ||
2395 | if (err) { | ||
2396 | netdev_err(qdev->ndev, | ||
2397 | "PCI mapping frags failed with error: %d\n", | ||
2398 | err); | ||
2399 | goto map_error; | ||
2400 | } | ||
2401 | |||
2402 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); | ||
2403 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | ||
2404 | oal_entry->len = cpu_to_le32(frag->size); | ||
2405 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); | ||
2406 | dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); | ||
2407 | } | ||
2408 | /* Terminate the last segment. */ | ||
2409 | oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); | ||
2451 | return NETDEV_TX_OK; | 2410 | return NETDEV_TX_OK; |
2452 | 2411 | ||
2453 | map_error: | 2412 | map_error: |
@@ -2459,13 +2418,18 @@ map_error: | |||
2459 | seg = 1; | 2418 | seg = 1; |
2460 | oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; | 2419 | oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; |
2461 | oal = tx_cb->oal; | 2420 | oal = tx_cb->oal; |
2462 | for (i=0; i<completed_segs; i++,seg++) { | 2421 | for (i = 0; i < completed_segs; i++, seg++) { |
2463 | oal_entry++; | 2422 | oal_entry++; |
2464 | 2423 | ||
2465 | if((seg == 2 && seg_cnt > 3) || /* Check for continuation */ | 2424 | /* |
2466 | (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ | 2425 | * Check for continuation requirements. |
2467 | (seg == 12 && seg_cnt > 13) || /* but necessary. */ | 2426 | * It's strange but necessary. |
2468 | (seg == 17 && seg_cnt > 18)) { | 2427 | */ |
2428 | |||
2429 | if ((seg == 2 && seg_cnt > 3) || | ||
2430 | (seg == 7 && seg_cnt > 8) || | ||
2431 | (seg == 12 && seg_cnt > 13) || | ||
2432 | (seg == 17 && seg_cnt > 18)) { | ||
2469 | pci_unmap_single(qdev->pdev, | 2433 | pci_unmap_single(qdev->pdev, |
2470 | dma_unmap_addr(&tx_cb->map[seg], mapaddr), | 2434 | dma_unmap_addr(&tx_cb->map[seg], mapaddr), |
2471 | dma_unmap_len(&tx_cb->map[seg], maplen), | 2435 | dma_unmap_len(&tx_cb->map[seg], maplen), |
@@ -2504,18 +2468,19 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, | |||
2504 | struct net_device *ndev) | 2468 | struct net_device *ndev) |
2505 | { | 2469 | { |
2506 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); | 2470 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); |
2507 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 2471 | struct ql3xxx_port_registers __iomem *port_regs = |
2472 | qdev->mem_map_registers; | ||
2508 | struct ql_tx_buf_cb *tx_cb; | 2473 | struct ql_tx_buf_cb *tx_cb; |
2509 | u32 tot_len = skb->len; | 2474 | u32 tot_len = skb->len; |
2510 | struct ob_mac_iocb_req *mac_iocb_ptr; | 2475 | struct ob_mac_iocb_req *mac_iocb_ptr; |
2511 | 2476 | ||
2512 | if (unlikely(atomic_read(&qdev->tx_count) < 2)) { | 2477 | if (unlikely(atomic_read(&qdev->tx_count) < 2)) |
2513 | return NETDEV_TX_BUSY; | 2478 | return NETDEV_TX_BUSY; |
2514 | } | ||
2515 | 2479 | ||
2516 | tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; | 2480 | tx_cb = &qdev->tx_buf[qdev->req_producer_index]; |
2517 | if((tx_cb->seg_count = ql_get_seg_count(qdev, | 2481 | tx_cb->seg_count = ql_get_seg_count(qdev, |
2518 | (skb_shinfo(skb)->nr_frags))) == -1) { | 2482 | skb_shinfo(skb)->nr_frags); |
2483 | if (tx_cb->seg_count == -1) { | ||
2519 | netdev_err(ndev, "%s: invalid segment count!\n", __func__); | 2484 | netdev_err(ndev, "%s: invalid segment count!\n", __func__); |
2520 | return NETDEV_TX_OK; | 2485 | return NETDEV_TX_OK; |
2521 | } | 2486 | } |
@@ -2532,7 +2497,7 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, | |||
2532 | skb->ip_summed == CHECKSUM_PARTIAL) | 2497 | skb->ip_summed == CHECKSUM_PARTIAL) |
2533 | ql_hw_csum_setup(skb, mac_iocb_ptr); | 2498 | ql_hw_csum_setup(skb, mac_iocb_ptr); |
2534 | 2499 | ||
2535 | if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { | 2500 | if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { |
2536 | netdev_err(ndev, "%s: Could not map the segments!\n", __func__); | 2501 | netdev_err(ndev, "%s: Could not map the segments!\n", __func__); |
2537 | return NETDEV_TX_BUSY; | 2502 | return NETDEV_TX_BUSY; |
2538 | } | 2503 | } |
@@ -2586,14 +2551,14 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) | |||
2586 | return -ENOMEM; | 2551 | return -ENOMEM; |
2587 | } | 2552 | } |
2588 | 2553 | ||
2589 | set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); | 2554 | set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); |
2590 | 2555 | ||
2591 | return 0; | 2556 | return 0; |
2592 | } | 2557 | } |
2593 | 2558 | ||
2594 | static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) | 2559 | static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) |
2595 | { | 2560 | { |
2596 | if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) { | 2561 | if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { |
2597 | netdev_info(qdev->ndev, "Already done\n"); | 2562 | netdev_info(qdev->ndev, "Already done\n"); |
2598 | return; | 2563 | return; |
2599 | } | 2564 | } |
@@ -2610,29 +2575,31 @@ static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) | |||
2610 | 2575 | ||
2611 | qdev->rsp_q_virt_addr = NULL; | 2576 | qdev->rsp_q_virt_addr = NULL; |
2612 | 2577 | ||
2613 | clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); | 2578 | clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); |
2614 | } | 2579 | } |
2615 | 2580 | ||
2616 | static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) | 2581 | static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) |
2617 | { | 2582 | { |
2618 | /* Create Large Buffer Queue */ | 2583 | /* Create Large Buffer Queue */ |
2619 | qdev->lrg_buf_q_size = | 2584 | qdev->lrg_buf_q_size = |
2620 | qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); | 2585 | qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); |
2621 | if (qdev->lrg_buf_q_size < PAGE_SIZE) | 2586 | if (qdev->lrg_buf_q_size < PAGE_SIZE) |
2622 | qdev->lrg_buf_q_alloc_size = PAGE_SIZE; | 2587 | qdev->lrg_buf_q_alloc_size = PAGE_SIZE; |
2623 | else | 2588 | else |
2624 | qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; | 2589 | qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; |
2625 | 2590 | ||
2626 | qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL); | 2591 | qdev->lrg_buf = |
2592 | kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), | ||
2593 | GFP_KERNEL); | ||
2627 | if (qdev->lrg_buf == NULL) { | 2594 | if (qdev->lrg_buf == NULL) { |
2628 | netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); | 2595 | netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); |
2629 | return -ENOMEM; | 2596 | return -ENOMEM; |
2630 | } | 2597 | } |
2631 | 2598 | ||
2632 | qdev->lrg_buf_q_alloc_virt_addr = | 2599 | qdev->lrg_buf_q_alloc_virt_addr = |
2633 | pci_alloc_consistent(qdev->pdev, | 2600 | pci_alloc_consistent(qdev->pdev, |
2634 | qdev->lrg_buf_q_alloc_size, | 2601 | qdev->lrg_buf_q_alloc_size, |
2635 | &qdev->lrg_buf_q_alloc_phy_addr); | 2602 | &qdev->lrg_buf_q_alloc_phy_addr); |
2636 | 2603 | ||
2637 | if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { | 2604 | if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { |
2638 | netdev_err(qdev->ndev, "lBufQ failed\n"); | 2605 | netdev_err(qdev->ndev, "lBufQ failed\n"); |
@@ -2643,16 +2610,16 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) | |||
2643 | 2610 | ||
2644 | /* Create Small Buffer Queue */ | 2611 | /* Create Small Buffer Queue */ |
2645 | qdev->small_buf_q_size = | 2612 | qdev->small_buf_q_size = |
2646 | NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); | 2613 | NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); |
2647 | if (qdev->small_buf_q_size < PAGE_SIZE) | 2614 | if (qdev->small_buf_q_size < PAGE_SIZE) |
2648 | qdev->small_buf_q_alloc_size = PAGE_SIZE; | 2615 | qdev->small_buf_q_alloc_size = PAGE_SIZE; |
2649 | else | 2616 | else |
2650 | qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; | 2617 | qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; |
2651 | 2618 | ||
2652 | qdev->small_buf_q_alloc_virt_addr = | 2619 | qdev->small_buf_q_alloc_virt_addr = |
2653 | pci_alloc_consistent(qdev->pdev, | 2620 | pci_alloc_consistent(qdev->pdev, |
2654 | qdev->small_buf_q_alloc_size, | 2621 | qdev->small_buf_q_alloc_size, |
2655 | &qdev->small_buf_q_alloc_phy_addr); | 2622 | &qdev->small_buf_q_alloc_phy_addr); |
2656 | 2623 | ||
2657 | if (qdev->small_buf_q_alloc_virt_addr == NULL) { | 2624 | if (qdev->small_buf_q_alloc_virt_addr == NULL) { |
2658 | netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); | 2625 | netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); |
@@ -2664,17 +2631,17 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) | |||
2664 | 2631 | ||
2665 | qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; | 2632 | qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; |
2666 | qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; | 2633 | qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; |
2667 | set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); | 2634 | set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); |
2668 | return 0; | 2635 | return 0; |
2669 | } | 2636 | } |
2670 | 2637 | ||
2671 | static void ql_free_buffer_queues(struct ql3_adapter *qdev) | 2638 | static void ql_free_buffer_queues(struct ql3_adapter *qdev) |
2672 | { | 2639 | { |
2673 | if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) { | 2640 | if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { |
2674 | netdev_info(qdev->ndev, "Already done\n"); | 2641 | netdev_info(qdev->ndev, "Already done\n"); |
2675 | return; | 2642 | return; |
2676 | } | 2643 | } |
2677 | if(qdev->lrg_buf) kfree(qdev->lrg_buf); | 2644 | kfree(qdev->lrg_buf); |
2678 | pci_free_consistent(qdev->pdev, | 2645 | pci_free_consistent(qdev->pdev, |
2679 | qdev->lrg_buf_q_alloc_size, | 2646 | qdev->lrg_buf_q_alloc_size, |
2680 | qdev->lrg_buf_q_alloc_virt_addr, | 2647 | qdev->lrg_buf_q_alloc_virt_addr, |
@@ -2689,7 +2656,7 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev) | |||
2689 | 2656 | ||
2690 | qdev->small_buf_q_virt_addr = NULL; | 2657 | qdev->small_buf_q_virt_addr = NULL; |
2691 | 2658 | ||
2692 | clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); | 2659 | clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); |
2693 | } | 2660 | } |
2694 | 2661 | ||
2695 | static int ql_alloc_small_buffers(struct ql3_adapter *qdev) | 2662 | static int ql_alloc_small_buffers(struct ql3_adapter *qdev) |
@@ -2699,13 +2666,13 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev) | |||
2699 | 2666 | ||
2700 | /* Currently we allocate on one of memory and use it for smallbuffers */ | 2667 | /* Currently we allocate on one of memory and use it for smallbuffers */ |
2701 | qdev->small_buf_total_size = | 2668 | qdev->small_buf_total_size = |
2702 | (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * | 2669 | (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * |
2703 | QL_SMALL_BUFFER_SIZE); | 2670 | QL_SMALL_BUFFER_SIZE); |
2704 | 2671 | ||
2705 | qdev->small_buf_virt_addr = | 2672 | qdev->small_buf_virt_addr = |
2706 | pci_alloc_consistent(qdev->pdev, | 2673 | pci_alloc_consistent(qdev->pdev, |
2707 | qdev->small_buf_total_size, | 2674 | qdev->small_buf_total_size, |
2708 | &qdev->small_buf_phy_addr); | 2675 | &qdev->small_buf_phy_addr); |
2709 | 2676 | ||
2710 | if (qdev->small_buf_virt_addr == NULL) { | 2677 | if (qdev->small_buf_virt_addr == NULL) { |
2711 | netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); | 2678 | netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); |
@@ -2727,13 +2694,13 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev) | |||
2727 | small_buf_q_entry++; | 2694 | small_buf_q_entry++; |
2728 | } | 2695 | } |
2729 | qdev->small_buf_index = 0; | 2696 | qdev->small_buf_index = 0; |
2730 | set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags); | 2697 | set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); |
2731 | return 0; | 2698 | return 0; |
2732 | } | 2699 | } |
2733 | 2700 | ||
2734 | static void ql_free_small_buffers(struct ql3_adapter *qdev) | 2701 | static void ql_free_small_buffers(struct ql3_adapter *qdev) |
2735 | { | 2702 | { |
2736 | if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) { | 2703 | if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { |
2737 | netdev_info(qdev->ndev, "Already done\n"); | 2704 | netdev_info(qdev->ndev, "Already done\n"); |
2738 | return; | 2705 | return; |
2739 | } | 2706 | } |
@@ -2819,7 +2786,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) | |||
2819 | PCI_DMA_FROMDEVICE); | 2786 | PCI_DMA_FROMDEVICE); |
2820 | 2787 | ||
2821 | err = pci_dma_mapping_error(qdev->pdev, map); | 2788 | err = pci_dma_mapping_error(qdev->pdev, map); |
2822 | if(err) { | 2789 | if (err) { |
2823 | netdev_err(qdev->ndev, | 2790 | netdev_err(qdev->ndev, |
2824 | "PCI mapping failed with error: %d\n", | 2791 | "PCI mapping failed with error: %d\n", |
2825 | err); | 2792 | err); |
@@ -2847,10 +2814,8 @@ static void ql_free_send_free_list(struct ql3_adapter *qdev) | |||
2847 | 2814 | ||
2848 | tx_cb = &qdev->tx_buf[0]; | 2815 | tx_cb = &qdev->tx_buf[0]; |
2849 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { | 2816 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { |
2850 | if (tx_cb->oal) { | 2817 | kfree(tx_cb->oal); |
2851 | kfree(tx_cb->oal); | 2818 | tx_cb->oal = NULL; |
2852 | tx_cb->oal = NULL; | ||
2853 | } | ||
2854 | tx_cb++; | 2819 | tx_cb++; |
2855 | } | 2820 | } |
2856 | } | 2821 | } |
@@ -2859,8 +2824,7 @@ static int ql_create_send_free_list(struct ql3_adapter *qdev) | |||
2859 | { | 2824 | { |
2860 | struct ql_tx_buf_cb *tx_cb; | 2825 | struct ql_tx_buf_cb *tx_cb; |
2861 | int i; | 2826 | int i; |
2862 | struct ob_mac_iocb_req *req_q_curr = | 2827 | struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; |
2863 | qdev->req_q_virt_addr; | ||
2864 | 2828 | ||
2865 | /* Create free list of transmit buffers */ | 2829 | /* Create free list of transmit buffers */ |
2866 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { | 2830 | for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { |
@@ -2881,8 +2845,7 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev) | |||
2881 | if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { | 2845 | if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { |
2882 | qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; | 2846 | qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; |
2883 | qdev->lrg_buffer_len = NORMAL_MTU_SIZE; | 2847 | qdev->lrg_buffer_len = NORMAL_MTU_SIZE; |
2884 | } | 2848 | } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { |
2885 | else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { | ||
2886 | /* | 2849 | /* |
2887 | * Bigger buffers, so less of them. | 2850 | * Bigger buffers, so less of them. |
2888 | */ | 2851 | */ |
@@ -2893,10 +2856,11 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev) | |||
2893 | qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); | 2856 | qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); |
2894 | return -ENOMEM; | 2857 | return -ENOMEM; |
2895 | } | 2858 | } |
2896 | qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; | 2859 | qdev->num_large_buffers = |
2860 | qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; | ||
2897 | qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; | 2861 | qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; |
2898 | qdev->max_frame_size = | 2862 | qdev->max_frame_size = |
2899 | (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; | 2863 | (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; |
2900 | 2864 | ||
2901 | /* | 2865 | /* |
2902 | * First allocate a page of shared memory and use it for shadow | 2866 | * First allocate a page of shared memory and use it for shadow |
@@ -2904,22 +2868,22 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev) | |||
2904 | * Network Completion Queue Producer Index Register | 2868 | * Network Completion Queue Producer Index Register |
2905 | */ | 2869 | */ |
2906 | qdev->shadow_reg_virt_addr = | 2870 | qdev->shadow_reg_virt_addr = |
2907 | pci_alloc_consistent(qdev->pdev, | 2871 | pci_alloc_consistent(qdev->pdev, |
2908 | PAGE_SIZE, &qdev->shadow_reg_phy_addr); | 2872 | PAGE_SIZE, &qdev->shadow_reg_phy_addr); |
2909 | 2873 | ||
2910 | if (qdev->shadow_reg_virt_addr != NULL) { | 2874 | if (qdev->shadow_reg_virt_addr != NULL) { |
2911 | qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; | 2875 | qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; |
2912 | qdev->req_consumer_index_phy_addr_high = | 2876 | qdev->req_consumer_index_phy_addr_high = |
2913 | MS_64BITS(qdev->shadow_reg_phy_addr); | 2877 | MS_64BITS(qdev->shadow_reg_phy_addr); |
2914 | qdev->req_consumer_index_phy_addr_low = | 2878 | qdev->req_consumer_index_phy_addr_low = |
2915 | LS_64BITS(qdev->shadow_reg_phy_addr); | 2879 | LS_64BITS(qdev->shadow_reg_phy_addr); |
2916 | 2880 | ||
2917 | qdev->prsp_producer_index = | 2881 | qdev->prsp_producer_index = |
2918 | (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); | 2882 | (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); |
2919 | qdev->rsp_producer_index_phy_addr_high = | 2883 | qdev->rsp_producer_index_phy_addr_high = |
2920 | qdev->req_consumer_index_phy_addr_high; | 2884 | qdev->req_consumer_index_phy_addr_high; |
2921 | qdev->rsp_producer_index_phy_addr_low = | 2885 | qdev->rsp_producer_index_phy_addr_low = |
2922 | qdev->req_consumer_index_phy_addr_low + 8; | 2886 | qdev->req_consumer_index_phy_addr_low + 8; |
2923 | } else { | 2887 | } else { |
2924 | netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); | 2888 | netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); |
2925 | return -ENOMEM; | 2889 | return -ENOMEM; |
@@ -2989,7 +2953,7 @@ static int ql_init_misc_registers(struct ql3_adapter *qdev) | |||
2989 | struct ql3xxx_local_ram_registers __iomem *local_ram = | 2953 | struct ql3xxx_local_ram_registers __iomem *local_ram = |
2990 | (void __iomem *)qdev->mem_map_registers; | 2954 | (void __iomem *)qdev->mem_map_registers; |
2991 | 2955 | ||
2992 | if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, | 2956 | if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, |
2993 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 2957 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * |
2994 | 2) << 4)) | 2958 | 2) << 4)) |
2995 | return -1; | 2959 | return -1; |
@@ -3045,18 +3009,20 @@ static int ql_init_misc_registers(struct ql3_adapter *qdev) | |||
3045 | static int ql_adapter_initialize(struct ql3_adapter *qdev) | 3009 | static int ql_adapter_initialize(struct ql3_adapter *qdev) |
3046 | { | 3010 | { |
3047 | u32 value; | 3011 | u32 value; |
3048 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 3012 | struct ql3xxx_port_registers __iomem *port_regs = |
3013 | qdev->mem_map_registers; | ||
3014 | u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; | ||
3049 | struct ql3xxx_host_memory_registers __iomem *hmem_regs = | 3015 | struct ql3xxx_host_memory_registers __iomem *hmem_regs = |
3050 | (void __iomem *)port_regs; | 3016 | (void __iomem *)port_regs; |
3051 | u32 delay = 10; | 3017 | u32 delay = 10; |
3052 | int status = 0; | 3018 | int status = 0; |
3053 | unsigned long hw_flags = 0; | 3019 | unsigned long hw_flags = 0; |
3054 | 3020 | ||
3055 | if(ql_mii_setup(qdev)) | 3021 | if (ql_mii_setup(qdev)) |
3056 | return -1; | 3022 | return -1; |
3057 | 3023 | ||
3058 | /* Bring out PHY out of reset */ | 3024 | /* Bring out PHY out of reset */ |
3059 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 3025 | ql_write_common_reg(qdev, spir, |
3060 | (ISP_SERIAL_PORT_IF_WE | | 3026 | (ISP_SERIAL_PORT_IF_WE | |
3061 | (ISP_SERIAL_PORT_IF_WE << 16))); | 3027 | (ISP_SERIAL_PORT_IF_WE << 16))); |
3062 | /* Give the PHY time to come out of reset. */ | 3028 | /* Give the PHY time to come out of reset. */ |
@@ -3065,13 +3031,13 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3065 | netif_carrier_off(qdev->ndev); | 3031 | netif_carrier_off(qdev->ndev); |
3066 | 3032 | ||
3067 | /* V2 chip fix for ARS-39168. */ | 3033 | /* V2 chip fix for ARS-39168. */ |
3068 | ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, | 3034 | ql_write_common_reg(qdev, spir, |
3069 | (ISP_SERIAL_PORT_IF_SDE | | 3035 | (ISP_SERIAL_PORT_IF_SDE | |
3070 | (ISP_SERIAL_PORT_IF_SDE << 16))); | 3036 | (ISP_SERIAL_PORT_IF_SDE << 16))); |
3071 | 3037 | ||
3072 | /* Request Queue Registers */ | 3038 | /* Request Queue Registers */ |
3073 | *((u32 *) (qdev->preq_consumer_index)) = 0; | 3039 | *((u32 *)(qdev->preq_consumer_index)) = 0; |
3074 | atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES); | 3040 | atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); |
3075 | qdev->req_producer_index = 0; | 3041 | qdev->req_producer_index = 0; |
3076 | 3042 | ||
3077 | ql_write_page1_reg(qdev, | 3043 | ql_write_page1_reg(qdev, |
@@ -3121,7 +3087,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3121 | &hmem_regs->rxLargeQBaseAddrLow, | 3087 | &hmem_regs->rxLargeQBaseAddrLow, |
3122 | LS_64BITS(qdev->lrg_buf_q_phy_addr)); | 3088 | LS_64BITS(qdev->lrg_buf_q_phy_addr)); |
3123 | 3089 | ||
3124 | ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries); | 3090 | ql_write_page1_reg(qdev, |
3091 | &hmem_regs->rxLargeQLength, | ||
3092 | qdev->num_lbufq_entries); | ||
3125 | 3093 | ||
3126 | ql_write_page1_reg(qdev, | 3094 | ql_write_page1_reg(qdev, |
3127 | &hmem_regs->rxLargeBufferLength, | 3095 | &hmem_regs->rxLargeBufferLength, |
@@ -3171,7 +3139,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3171 | if ((value & PORT_STATUS_IC) == 0) { | 3139 | if ((value & PORT_STATUS_IC) == 0) { |
3172 | 3140 | ||
3173 | /* Chip has not been configured yet, so let it rip. */ | 3141 | /* Chip has not been configured yet, so let it rip. */ |
3174 | if(ql_init_misc_registers(qdev)) { | 3142 | if (ql_init_misc_registers(qdev)) { |
3175 | status = -1; | 3143 | status = -1; |
3176 | goto out; | 3144 | goto out; |
3177 | } | 3145 | } |
@@ -3181,7 +3149,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3181 | 3149 | ||
3182 | value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; | 3150 | value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; |
3183 | 3151 | ||
3184 | if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, | 3152 | if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, |
3185 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) | 3153 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) |
3186 | * 2) << 13)) { | 3154 | * 2) << 13)) { |
3187 | status = -1; | 3155 | status = -1; |
@@ -3204,7 +3172,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) | |||
3204 | &port_regs->mac0MaxFrameLengthReg, | 3172 | &port_regs->mac0MaxFrameLengthReg, |
3205 | qdev->max_frame_size); | 3173 | qdev->max_frame_size); |
3206 | 3174 | ||
3207 | if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, | 3175 | if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, |
3208 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * | 3176 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * |
3209 | 2) << 7)) { | 3177 | 2) << 7)) { |
3210 | status = -1; | 3178 | status = -1; |
@@ -3297,7 +3265,8 @@ out: | |||
3297 | */ | 3265 | */ |
3298 | static int ql_adapter_reset(struct ql3_adapter *qdev) | 3266 | static int ql_adapter_reset(struct ql3_adapter *qdev) |
3299 | { | 3267 | { |
3300 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 3268 | struct ql3xxx_port_registers __iomem *port_regs = |
3269 | qdev->mem_map_registers; | ||
3301 | int status = 0; | 3270 | int status = 0; |
3302 | u16 value; | 3271 | u16 value; |
3303 | int max_wait_time; | 3272 | int max_wait_time; |
@@ -3357,13 +3326,11 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) | |||
3357 | */ | 3326 | */ |
3358 | max_wait_time = 5; | 3327 | max_wait_time = 5; |
3359 | do { | 3328 | do { |
3360 | value = | 3329 | value = ql_read_common_reg(qdev, |
3361 | ql_read_common_reg(qdev, | 3330 | &port_regs->CommonRegs. |
3362 | &port_regs->CommonRegs. | 3331 | ispControlStatus); |
3363 | ispControlStatus); | 3332 | if ((value & ISP_CONTROL_FSR) == 0) |
3364 | if ((value & ISP_CONTROL_FSR) == 0) { | ||
3365 | break; | 3333 | break; |
3366 | } | ||
3367 | ssleep(1); | 3334 | ssleep(1); |
3368 | } while ((--max_wait_time)); | 3335 | } while ((--max_wait_time)); |
3369 | } | 3336 | } |
@@ -3377,7 +3344,8 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) | |||
3377 | 3344 | ||
3378 | static void ql_set_mac_info(struct ql3_adapter *qdev) | 3345 | static void ql_set_mac_info(struct ql3_adapter *qdev) |
3379 | { | 3346 | { |
3380 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 3347 | struct ql3xxx_port_registers __iomem *port_regs = |
3348 | qdev->mem_map_registers; | ||
3381 | u32 value, port_status; | 3349 | u32 value, port_status; |
3382 | u8 func_number; | 3350 | u8 func_number; |
3383 | 3351 | ||
@@ -3393,9 +3361,9 @@ static void ql_set_mac_info(struct ql3_adapter *qdev) | |||
3393 | qdev->mb_bit_mask = FN0_MA_BITS_MASK; | 3361 | qdev->mb_bit_mask = FN0_MA_BITS_MASK; |
3394 | qdev->PHYAddr = PORT0_PHY_ADDRESS; | 3362 | qdev->PHYAddr = PORT0_PHY_ADDRESS; |
3395 | if (port_status & PORT_STATUS_SM0) | 3363 | if (port_status & PORT_STATUS_SM0) |
3396 | set_bit(QL_LINK_OPTICAL,&qdev->flags); | 3364 | set_bit(QL_LINK_OPTICAL, &qdev->flags); |
3397 | else | 3365 | else |
3398 | clear_bit(QL_LINK_OPTICAL,&qdev->flags); | 3366 | clear_bit(QL_LINK_OPTICAL, &qdev->flags); |
3399 | break; | 3367 | break; |
3400 | 3368 | ||
3401 | case ISP_CONTROL_FN1_NET: | 3369 | case ISP_CONTROL_FN1_NET: |
@@ -3404,9 +3372,9 @@ static void ql_set_mac_info(struct ql3_adapter *qdev) | |||
3404 | qdev->mb_bit_mask = FN1_MA_BITS_MASK; | 3372 | qdev->mb_bit_mask = FN1_MA_BITS_MASK; |
3405 | qdev->PHYAddr = PORT1_PHY_ADDRESS; | 3373 | qdev->PHYAddr = PORT1_PHY_ADDRESS; |
3406 | if (port_status & PORT_STATUS_SM1) | 3374 | if (port_status & PORT_STATUS_SM1) |
3407 | set_bit(QL_LINK_OPTICAL,&qdev->flags); | 3375 | set_bit(QL_LINK_OPTICAL, &qdev->flags); |
3408 | else | 3376 | else |
3409 | clear_bit(QL_LINK_OPTICAL,&qdev->flags); | 3377 | clear_bit(QL_LINK_OPTICAL, &qdev->flags); |
3410 | break; | 3378 | break; |
3411 | 3379 | ||
3412 | case ISP_CONTROL_FN0_SCSI: | 3380 | case ISP_CONTROL_FN0_SCSI: |
@@ -3428,7 +3396,7 @@ static void ql_display_dev_info(struct net_device *ndev) | |||
3428 | netdev_info(ndev, | 3396 | netdev_info(ndev, |
3429 | "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", | 3397 | "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", |
3430 | DRV_NAME, qdev->index, qdev->chip_rev_id, | 3398 | DRV_NAME, qdev->index, qdev->chip_rev_id, |
3431 | (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022", | 3399 | qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", |
3432 | qdev->pci_slot); | 3400 | qdev->pci_slot); |
3433 | netdev_info(ndev, "%s Interface\n", | 3401 | netdev_info(ndev, "%s Interface\n", |
3434 | test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); | 3402 | test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); |
@@ -3455,16 +3423,16 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) | |||
3455 | netif_stop_queue(ndev); | 3423 | netif_stop_queue(ndev); |
3456 | netif_carrier_off(ndev); | 3424 | netif_carrier_off(ndev); |
3457 | 3425 | ||
3458 | clear_bit(QL_ADAPTER_UP,&qdev->flags); | 3426 | clear_bit(QL_ADAPTER_UP, &qdev->flags); |
3459 | clear_bit(QL_LINK_MASTER,&qdev->flags); | 3427 | clear_bit(QL_LINK_MASTER, &qdev->flags); |
3460 | 3428 | ||
3461 | ql_disable_interrupts(qdev); | 3429 | ql_disable_interrupts(qdev); |
3462 | 3430 | ||
3463 | free_irq(qdev->pdev->irq, ndev); | 3431 | free_irq(qdev->pdev->irq, ndev); |
3464 | 3432 | ||
3465 | if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { | 3433 | if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { |
3466 | netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); | 3434 | netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); |
3467 | clear_bit(QL_MSI_ENABLED,&qdev->flags); | 3435 | clear_bit(QL_MSI_ENABLED, &qdev->flags); |
3468 | pci_disable_msi(qdev->pdev); | 3436 | pci_disable_msi(qdev->pdev); |
3469 | } | 3437 | } |
3470 | 3438 | ||
@@ -3478,7 +3446,8 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) | |||
3478 | 3446 | ||
3479 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 3447 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
3480 | if (ql_wait_for_drvr_lock(qdev)) { | 3448 | if (ql_wait_for_drvr_lock(qdev)) { |
3481 | if ((soft_reset = ql_adapter_reset(qdev))) { | 3449 | soft_reset = ql_adapter_reset(qdev); |
3450 | if (soft_reset) { | ||
3482 | netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", | 3451 | netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", |
3483 | qdev->index); | 3452 | qdev->index); |
3484 | } | 3453 | } |
@@ -3514,24 +3483,26 @@ static int ql_adapter_up(struct ql3_adapter *qdev) | |||
3514 | qdev->msi = 0; | 3483 | qdev->msi = 0; |
3515 | } else { | 3484 | } else { |
3516 | netdev_info(ndev, "MSI Enabled...\n"); | 3485 | netdev_info(ndev, "MSI Enabled...\n"); |
3517 | set_bit(QL_MSI_ENABLED,&qdev->flags); | 3486 | set_bit(QL_MSI_ENABLED, &qdev->flags); |
3518 | irq_flags &= ~IRQF_SHARED; | 3487 | irq_flags &= ~IRQF_SHARED; |
3519 | } | 3488 | } |
3520 | } | 3489 | } |
3521 | 3490 | ||
3522 | if ((err = request_irq(qdev->pdev->irq, | 3491 | err = request_irq(qdev->pdev->irq, ql3xxx_isr, |
3523 | ql3xxx_isr, | 3492 | irq_flags, ndev->name, ndev); |
3524 | irq_flags, ndev->name, ndev))) { | 3493 | if (err) { |
3525 | netdev_err(ndev, | 3494 | netdev_err(ndev, |
3526 | "Failed to reserve interrupt %d already in use\n", | 3495 | "Failed to reserve interrupt %d - already in use\n", |
3527 | qdev->pdev->irq); | 3496 | qdev->pdev->irq); |
3528 | goto err_irq; | 3497 | goto err_irq; |
3529 | } | 3498 | } |
3530 | 3499 | ||
3531 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 3500 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
3532 | 3501 | ||
3533 | if ((err = ql_wait_for_drvr_lock(qdev))) { | 3502 | err = ql_wait_for_drvr_lock(qdev); |
3534 | if ((err = ql_adapter_initialize(qdev))) { | 3503 | if (err) { |
3504 | err = ql_adapter_initialize(qdev); | ||
3505 | if (err) { | ||
3535 | netdev_err(ndev, "Unable to initialize adapter\n"); | 3506 | netdev_err(ndev, "Unable to initialize adapter\n"); |
3536 | goto err_init; | 3507 | goto err_init; |
3537 | } | 3508 | } |
@@ -3544,7 +3515,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev) | |||
3544 | 3515 | ||
3545 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 3516 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
3546 | 3517 | ||
3547 | set_bit(QL_ADAPTER_UP,&qdev->flags); | 3518 | set_bit(QL_ADAPTER_UP, &qdev->flags); |
3548 | 3519 | ||
3549 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); | 3520 | mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); |
3550 | 3521 | ||
@@ -3558,9 +3529,9 @@ err_lock: | |||
3558 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | 3529 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
3559 | free_irq(qdev->pdev->irq, ndev); | 3530 | free_irq(qdev->pdev->irq, ndev); |
3560 | err_irq: | 3531 | err_irq: |
3561 | if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { | 3532 | if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { |
3562 | netdev_info(ndev, "calling pci_disable_msi()\n"); | 3533 | netdev_info(ndev, "calling pci_disable_msi()\n"); |
3563 | clear_bit(QL_MSI_ENABLED,&qdev->flags); | 3534 | clear_bit(QL_MSI_ENABLED, &qdev->flags); |
3564 | pci_disable_msi(qdev->pdev); | 3535 | pci_disable_msi(qdev->pdev); |
3565 | } | 3536 | } |
3566 | return err; | 3537 | return err; |
@@ -3568,7 +3539,7 @@ err_irq: | |||
3568 | 3539 | ||
3569 | static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) | 3540 | static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) |
3570 | { | 3541 | { |
3571 | if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) { | 3542 | if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { |
3572 | netdev_err(qdev->ndev, | 3543 | netdev_err(qdev->ndev, |
3573 | "Driver up/down cycle failed, closing device\n"); | 3544 | "Driver up/down cycle failed, closing device\n"); |
3574 | rtnl_lock(); | 3545 | rtnl_lock(); |
@@ -3587,24 +3558,24 @@ static int ql3xxx_close(struct net_device *ndev) | |||
3587 | * Wait for device to recover from a reset. | 3558 | * Wait for device to recover from a reset. |
3588 | * (Rarely happens, but possible.) | 3559 | * (Rarely happens, but possible.) |
3589 | */ | 3560 | */ |
3590 | while (!test_bit(QL_ADAPTER_UP,&qdev->flags)) | 3561 | while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) |
3591 | msleep(50); | 3562 | msleep(50); |
3592 | 3563 | ||
3593 | ql_adapter_down(qdev,QL_DO_RESET); | 3564 | ql_adapter_down(qdev, QL_DO_RESET); |
3594 | return 0; | 3565 | return 0; |
3595 | } | 3566 | } |
3596 | 3567 | ||
3597 | static int ql3xxx_open(struct net_device *ndev) | 3568 | static int ql3xxx_open(struct net_device *ndev) |
3598 | { | 3569 | { |
3599 | struct ql3_adapter *qdev = netdev_priv(ndev); | 3570 | struct ql3_adapter *qdev = netdev_priv(ndev); |
3600 | return (ql_adapter_up(qdev)); | 3571 | return ql_adapter_up(qdev); |
3601 | } | 3572 | } |
3602 | 3573 | ||
3603 | static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) | 3574 | static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) |
3604 | { | 3575 | { |
3605 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); | 3576 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); |
3606 | struct ql3xxx_port_registers __iomem *port_regs = | 3577 | struct ql3xxx_port_registers __iomem *port_regs = |
3607 | qdev->mem_map_registers; | 3578 | qdev->mem_map_registers; |
3608 | struct sockaddr *addr = p; | 3579 | struct sockaddr *addr = p; |
3609 | unsigned long hw_flags; | 3580 | unsigned long hw_flags; |
3610 | 3581 | ||
@@ -3659,11 +3630,12 @@ static void ql_reset_work(struct work_struct *work) | |||
3659 | u32 value; | 3630 | u32 value; |
3660 | struct ql_tx_buf_cb *tx_cb; | 3631 | struct ql_tx_buf_cb *tx_cb; |
3661 | int max_wait_time, i; | 3632 | int max_wait_time, i; |
3662 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 3633 | struct ql3xxx_port_registers __iomem *port_regs = |
3634 | qdev->mem_map_registers; | ||
3663 | unsigned long hw_flags; | 3635 | unsigned long hw_flags; |
3664 | 3636 | ||
3665 | if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) { | 3637 | if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { |
3666 | clear_bit(QL_LINK_MASTER,&qdev->flags); | 3638 | clear_bit(QL_LINK_MASTER, &qdev->flags); |
3667 | 3639 | ||
3668 | /* | 3640 | /* |
3669 | * Loop through the active list and return the skb. | 3641 | * Loop through the active list and return the skb. |
@@ -3675,13 +3647,16 @@ static void ql_reset_work(struct work_struct *work) | |||
3675 | netdev_printk(KERN_DEBUG, ndev, | 3647 | netdev_printk(KERN_DEBUG, ndev, |
3676 | "Freeing lost SKB\n"); | 3648 | "Freeing lost SKB\n"); |
3677 | pci_unmap_single(qdev->pdev, | 3649 | pci_unmap_single(qdev->pdev, |
3678 | dma_unmap_addr(&tx_cb->map[0], mapaddr), | 3650 | dma_unmap_addr(&tx_cb->map[0], |
3651 | mapaddr), | ||
3679 | dma_unmap_len(&tx_cb->map[0], maplen), | 3652 | dma_unmap_len(&tx_cb->map[0], maplen), |
3680 | PCI_DMA_TODEVICE); | 3653 | PCI_DMA_TODEVICE); |
3681 | for(j=1;j<tx_cb->seg_count;j++) { | 3654 | for (j = 1; j < tx_cb->seg_count; j++) { |
3682 | pci_unmap_page(qdev->pdev, | 3655 | pci_unmap_page(qdev->pdev, |
3683 | dma_unmap_addr(&tx_cb->map[j],mapaddr), | 3656 | dma_unmap_addr(&tx_cb->map[j], |
3684 | dma_unmap_len(&tx_cb->map[j],maplen), | 3657 | mapaddr), |
3658 | dma_unmap_len(&tx_cb->map[j], | ||
3659 | maplen), | ||
3685 | PCI_DMA_TODEVICE); | 3660 | PCI_DMA_TODEVICE); |
3686 | } | 3661 | } |
3687 | dev_kfree_skb(tx_cb->skb); | 3662 | dev_kfree_skb(tx_cb->skb); |
@@ -3736,16 +3711,16 @@ static void ql_reset_work(struct work_struct *work) | |||
3736 | netdev_err(ndev, | 3711 | netdev_err(ndev, |
3737 | "Timed out waiting for reset to complete\n"); | 3712 | "Timed out waiting for reset to complete\n"); |
3738 | netdev_err(ndev, "Do a reset\n"); | 3713 | netdev_err(ndev, "Do a reset\n"); |
3739 | clear_bit(QL_RESET_PER_SCSI,&qdev->flags); | 3714 | clear_bit(QL_RESET_PER_SCSI, &qdev->flags); |
3740 | clear_bit(QL_RESET_START,&qdev->flags); | 3715 | clear_bit(QL_RESET_START, &qdev->flags); |
3741 | ql_cycle_adapter(qdev,QL_DO_RESET); | 3716 | ql_cycle_adapter(qdev, QL_DO_RESET); |
3742 | return; | 3717 | return; |
3743 | } | 3718 | } |
3744 | 3719 | ||
3745 | clear_bit(QL_RESET_ACTIVE,&qdev->flags); | 3720 | clear_bit(QL_RESET_ACTIVE, &qdev->flags); |
3746 | clear_bit(QL_RESET_PER_SCSI,&qdev->flags); | 3721 | clear_bit(QL_RESET_PER_SCSI, &qdev->flags); |
3747 | clear_bit(QL_RESET_START,&qdev->flags); | 3722 | clear_bit(QL_RESET_START, &qdev->flags); |
3748 | ql_cycle_adapter(qdev,QL_NO_RESET); | 3723 | ql_cycle_adapter(qdev, QL_NO_RESET); |
3749 | } | 3724 | } |
3750 | } | 3725 | } |
3751 | 3726 | ||
@@ -3759,7 +3734,8 @@ static void ql_tx_timeout_work(struct work_struct *work) | |||
3759 | 3734 | ||
3760 | static void ql_get_board_info(struct ql3_adapter *qdev) | 3735 | static void ql_get_board_info(struct ql3_adapter *qdev) |
3761 | { | 3736 | { |
3762 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | 3737 | struct ql3xxx_port_registers __iomem *port_regs = |
3738 | qdev->mem_map_registers; | ||
3763 | u32 value; | 3739 | u32 value; |
3764 | 3740 | ||
3765 | value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); | 3741 | value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); |
@@ -3798,7 +3774,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
3798 | { | 3774 | { |
3799 | struct net_device *ndev = NULL; | 3775 | struct net_device *ndev = NULL; |
3800 | struct ql3_adapter *qdev = NULL; | 3776 | struct ql3_adapter *qdev = NULL; |
3801 | static int cards_found = 0; | 3777 | static int cards_found; |
3802 | int uninitialized_var(pci_using_dac), err; | 3778 | int uninitialized_var(pci_using_dac), err; |
3803 | 3779 | ||
3804 | err = pci_enable_device(pdev); | 3780 | err = pci_enable_device(pdev); |
@@ -3903,9 +3879,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
3903 | * Set the Maximum Memory Read Byte Count value. We do this to handle | 3879 | * Set the Maximum Memory Read Byte Count value. We do this to handle |
3904 | * jumbo frames. | 3880 | * jumbo frames. |
3905 | */ | 3881 | */ |
3906 | if (qdev->pci_x) { | 3882 | if (qdev->pci_x) |
3907 | pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); | 3883 | pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); |
3908 | } | ||
3909 | 3884 | ||
3910 | err = register_netdev(ndev); | 3885 | err = register_netdev(ndev); |
3911 | if (err) { | 3886 | if (err) { |