diff options
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r-- | drivers/net/bnx2x_main.c | 3875 |
1 files changed, 2423 insertions, 1452 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index efa942688f84..90b54e4c5c3b 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* bnx2x.c: Broadcom Everest network driver. | 1 | /* bnx2x_main.c: Broadcom Everest network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2007-2008 Broadcom Corporation | 3 | * Copyright (c) 2007-2008 Broadcom Corporation |
4 | * | 4 | * |
@@ -15,12 +15,6 @@ | |||
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | 17 | ||
18 | /* define this to make the driver freeze on error | ||
19 | * to allow getting debug info | ||
20 | * (you will need to reboot afterwards) | ||
21 | */ | ||
22 | /*#define BNX2X_STOP_ON_ERROR*/ | ||
23 | |||
24 | #include <linux/module.h> | 18 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
26 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
@@ -46,16 +40,17 @@ | |||
46 | #include <linux/mii.h> | 40 | #include <linux/mii.h> |
47 | #ifdef NETIF_F_HW_VLAN_TX | 41 | #ifdef NETIF_F_HW_VLAN_TX |
48 | #include <linux/if_vlan.h> | 42 | #include <linux/if_vlan.h> |
49 | #define BCM_VLAN 1 | ||
50 | #endif | 43 | #endif |
51 | #include <net/ip.h> | 44 | #include <net/ip.h> |
52 | #include <net/tcp.h> | 45 | #include <net/tcp.h> |
53 | #include <net/checksum.h> | 46 | #include <net/checksum.h> |
47 | #include <linux/version.h> | ||
48 | #include <net/ip6_checksum.h> | ||
54 | #include <linux/workqueue.h> | 49 | #include <linux/workqueue.h> |
55 | #include <linux/crc32.h> | 50 | #include <linux/crc32.h> |
51 | #include <linux/crc32c.h> | ||
56 | #include <linux/prefetch.h> | 52 | #include <linux/prefetch.h> |
57 | #include <linux/zlib.h> | 53 | #include <linux/zlib.h> |
58 | #include <linux/version.h> | ||
59 | #include <linux/io.h> | 54 | #include <linux/io.h> |
60 | 55 | ||
61 | #include "bnx2x_reg.h" | 56 | #include "bnx2x_reg.h" |
@@ -67,13 +62,13 @@ | |||
67 | 62 | ||
68 | #define DRV_MODULE_VERSION "1.42.4" | 63 | #define DRV_MODULE_VERSION "1.42.4" |
69 | #define DRV_MODULE_RELDATE "2008/4/9" | 64 | #define DRV_MODULE_RELDATE "2008/4/9" |
70 | #define BNX2X_BC_VER 0x040200 | 65 | #define BNX2X_BC_VER 0x040200 |
71 | 66 | ||
72 | /* Time in jiffies before concluding the transmitter is hung. */ | 67 | /* Time in jiffies before concluding the transmitter is hung */ |
73 | #define TX_TIMEOUT (5*HZ) | 68 | #define TX_TIMEOUT (5*HZ) |
74 | 69 | ||
75 | static char version[] __devinitdata = | 70 | static char version[] __devinitdata = |
76 | "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver " | 71 | "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver " |
77 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 72 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
78 | 73 | ||
79 | MODULE_AUTHOR("Eliezer Tamir"); | 74 | MODULE_AUTHOR("Eliezer Tamir"); |
@@ -83,20 +78,19 @@ MODULE_VERSION(DRV_MODULE_VERSION); | |||
83 | 78 | ||
84 | static int use_inta; | 79 | static int use_inta; |
85 | static int poll; | 80 | static int poll; |
86 | static int onefunc; | ||
87 | static int nomcp; | ||
88 | static int debug; | 81 | static int debug; |
82 | static int nomcp; | ||
83 | static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ | ||
89 | static int use_multi; | 84 | static int use_multi; |
90 | 85 | ||
91 | module_param(use_inta, int, 0); | 86 | module_param(use_inta, int, 0); |
92 | module_param(poll, int, 0); | 87 | module_param(poll, int, 0); |
93 | module_param(onefunc, int, 0); | ||
94 | module_param(debug, int, 0); | 88 | module_param(debug, int, 0); |
89 | module_param(nomcp, int, 0); | ||
95 | MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); | 90 | MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); |
96 | MODULE_PARM_DESC(poll, "use polling (for debug)"); | 91 | MODULE_PARM_DESC(poll, "use polling (for debug)"); |
97 | MODULE_PARM_DESC(onefunc, "enable only first function"); | ||
98 | MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)"); | ||
99 | MODULE_PARM_DESC(debug, "default debug msglevel"); | 92 | MODULE_PARM_DESC(debug, "default debug msglevel"); |
93 | MODULE_PARM_DESC(nomcp, "ignore management CPU"); | ||
100 | 94 | ||
101 | #ifdef BNX2X_MULTI | 95 | #ifdef BNX2X_MULTI |
102 | module_param(use_multi, int, 0); | 96 | module_param(use_multi, int, 0); |
@@ -105,18 +99,27 @@ MODULE_PARM_DESC(use_multi, "use per-CPU queues"); | |||
105 | 99 | ||
106 | enum bnx2x_board_type { | 100 | enum bnx2x_board_type { |
107 | BCM57710 = 0, | 101 | BCM57710 = 0, |
102 | BCM57711 = 1, | ||
103 | BCM57711E = 2, | ||
108 | }; | 104 | }; |
109 | 105 | ||
110 | /* indexed by board_t, above */ | 106 | /* indexed by board_type, above */ |
111 | static struct { | 107 | static struct { |
112 | char *name; | 108 | char *name; |
113 | } board_info[] __devinitdata = { | 109 | } board_info[] __devinitdata = { |
114 | { "Broadcom NetXtreme II BCM57710 XGb" } | 110 | { "Broadcom NetXtreme II BCM57710 XGb" }, |
111 | { "Broadcom NetXtreme II BCM57711 XGb" }, | ||
112 | { "Broadcom NetXtreme II BCM57711E XGb" } | ||
115 | }; | 113 | }; |
116 | 114 | ||
115 | |||
117 | static const struct pci_device_id bnx2x_pci_tbl[] = { | 116 | static const struct pci_device_id bnx2x_pci_tbl[] = { |
118 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710, | 117 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710, |
119 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 }, | 118 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 }, |
119 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711, | ||
120 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 }, | ||
121 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E, | ||
122 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E }, | ||
120 | { 0 } | 123 | { 0 } |
121 | }; | 124 | }; |
122 | 125 | ||
@@ -201,7 +204,8 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, | |||
201 | #else | 204 | #else |
202 | DMAE_CMD_ENDIANITY_DW_SWAP | | 205 | DMAE_CMD_ENDIANITY_DW_SWAP | |
203 | #endif | 206 | #endif |
204 | (bp->port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0)); | 207 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | |
208 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
205 | dmae->src_addr_lo = U64_LO(dma_addr); | 209 | dmae->src_addr_lo = U64_LO(dma_addr); |
206 | dmae->src_addr_hi = U64_HI(dma_addr); | 210 | dmae->src_addr_hi = U64_HI(dma_addr); |
207 | dmae->dst_addr_lo = dst_addr >> 2; | 211 | dmae->dst_addr_lo = dst_addr >> 2; |
@@ -224,7 +228,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, | |||
224 | 228 | ||
225 | *wb_comp = 0; | 229 | *wb_comp = 0; |
226 | 230 | ||
227 | bnx2x_post_dmae(bp, dmae, (bp->port)*MAX_DMAE_C_PER_PORT); | 231 | bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); |
228 | 232 | ||
229 | udelay(5); | 233 | udelay(5); |
230 | 234 | ||
@@ -277,7 +281,8 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) | |||
277 | #else | 281 | #else |
278 | DMAE_CMD_ENDIANITY_DW_SWAP | | 282 | DMAE_CMD_ENDIANITY_DW_SWAP | |
279 | #endif | 283 | #endif |
280 | (bp->port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0)); | 284 | (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | |
285 | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); | ||
281 | dmae->src_addr_lo = src_addr >> 2; | 286 | dmae->src_addr_lo = src_addr >> 2; |
282 | dmae->src_addr_hi = 0; | 287 | dmae->src_addr_hi = 0; |
283 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); | 288 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); |
@@ -297,7 +302,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) | |||
297 | 302 | ||
298 | *wb_comp = 0; | 303 | *wb_comp = 0; |
299 | 304 | ||
300 | bnx2x_post_dmae(bp, dmae, (bp->port)*MAX_DMAE_C_PER_PORT); | 305 | bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); |
301 | 306 | ||
302 | udelay(5); | 307 | udelay(5); |
303 | 308 | ||
@@ -345,47 +350,122 @@ static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg) | |||
345 | 350 | ||
346 | static int bnx2x_mc_assert(struct bnx2x *bp) | 351 | static int bnx2x_mc_assert(struct bnx2x *bp) |
347 | { | 352 | { |
348 | int i, j, rc = 0; | ||
349 | char last_idx; | 353 | char last_idx; |
350 | const char storm[] = {"XTCU"}; | 354 | int i, rc = 0; |
351 | const u32 intmem_base[] = { | 355 | u32 row0, row1, row2, row3; |
352 | BAR_XSTRORM_INTMEM, | 356 | |
353 | BAR_TSTRORM_INTMEM, | 357 | /* XSTORM */ |
354 | BAR_CSTRORM_INTMEM, | 358 | last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + |
355 | BAR_USTRORM_INTMEM | 359 | XSTORM_ASSERT_LIST_INDEX_OFFSET); |
356 | }; | 360 | if (last_idx) |
357 | 361 | BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); | |
358 | /* Go through all instances of all SEMIs */ | 362 | |
359 | for (i = 0; i < 4; i++) { | 363 | /* print the asserts */ |
360 | last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET + | 364 | for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { |
361 | intmem_base[i]); | 365 | |
362 | if (last_idx) | 366 | row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + |
363 | BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n", | 367 | XSTORM_ASSERT_LIST_OFFSET(i)); |
364 | storm[i], last_idx); | 368 | row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + |
365 | 369 | XSTORM_ASSERT_LIST_OFFSET(i) + 4); | |
366 | /* print the asserts */ | 370 | row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + |
367 | for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) { | 371 | XSTORM_ASSERT_LIST_OFFSET(i) + 8); |
368 | u32 row0, row1, row2, row3; | 372 | row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + |
369 | 373 | XSTORM_ASSERT_LIST_OFFSET(i) + 12); | |
370 | row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + | 374 | |
371 | intmem_base[i]); | 375 | if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { |
372 | row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 + | 376 | BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x" |
373 | intmem_base[i]); | 377 | " 0x%08x 0x%08x 0x%08x\n", |
374 | row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 + | 378 | i, row3, row2, row1, row0); |
375 | intmem_base[i]); | 379 | rc++; |
376 | row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 + | 380 | } else { |
377 | intmem_base[i]); | 381 | break; |
378 | 382 | } | |
379 | if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { | 383 | } |
380 | BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x =" | 384 | |
381 | " 0x%08x 0x%08x 0x%08x 0x%08x\n", | 385 | /* TSTORM */ |
382 | storm[i], j, row3, row2, row1, row0); | 386 | last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + |
383 | rc++; | 387 | TSTORM_ASSERT_LIST_INDEX_OFFSET); |
384 | } else { | 388 | if (last_idx) |
385 | break; | 389 | BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); |
386 | } | 390 | |
391 | /* print the asserts */ | ||
392 | for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { | ||
393 | |||
394 | row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + | ||
395 | TSTORM_ASSERT_LIST_OFFSET(i)); | ||
396 | row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + | ||
397 | TSTORM_ASSERT_LIST_OFFSET(i) + 4); | ||
398 | row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + | ||
399 | TSTORM_ASSERT_LIST_OFFSET(i) + 8); | ||
400 | row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + | ||
401 | TSTORM_ASSERT_LIST_OFFSET(i) + 12); | ||
402 | |||
403 | if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { | ||
404 | BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x" | ||
405 | " 0x%08x 0x%08x 0x%08x\n", | ||
406 | i, row3, row2, row1, row0); | ||
407 | rc++; | ||
408 | } else { | ||
409 | break; | ||
410 | } | ||
411 | } | ||
412 | |||
413 | /* CSTORM */ | ||
414 | last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + | ||
415 | CSTORM_ASSERT_LIST_INDEX_OFFSET); | ||
416 | if (last_idx) | ||
417 | BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); | ||
418 | |||
419 | /* print the asserts */ | ||
420 | for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { | ||
421 | |||
422 | row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + | ||
423 | CSTORM_ASSERT_LIST_OFFSET(i)); | ||
424 | row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + | ||
425 | CSTORM_ASSERT_LIST_OFFSET(i) + 4); | ||
426 | row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + | ||
427 | CSTORM_ASSERT_LIST_OFFSET(i) + 8); | ||
428 | row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + | ||
429 | CSTORM_ASSERT_LIST_OFFSET(i) + 12); | ||
430 | |||
431 | if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { | ||
432 | BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x" | ||
433 | " 0x%08x 0x%08x 0x%08x\n", | ||
434 | i, row3, row2, row1, row0); | ||
435 | rc++; | ||
436 | } else { | ||
437 | break; | ||
387 | } | 438 | } |
388 | } | 439 | } |
440 | |||
441 | /* USTORM */ | ||
442 | last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + | ||
443 | USTORM_ASSERT_LIST_INDEX_OFFSET); | ||
444 | if (last_idx) | ||
445 | BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); | ||
446 | |||
447 | /* print the asserts */ | ||
448 | for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { | ||
449 | |||
450 | row0 = REG_RD(bp, BAR_USTRORM_INTMEM + | ||
451 | USTORM_ASSERT_LIST_OFFSET(i)); | ||
452 | row1 = REG_RD(bp, BAR_USTRORM_INTMEM + | ||
453 | USTORM_ASSERT_LIST_OFFSET(i) + 4); | ||
454 | row2 = REG_RD(bp, BAR_USTRORM_INTMEM + | ||
455 | USTORM_ASSERT_LIST_OFFSET(i) + 8); | ||
456 | row3 = REG_RD(bp, BAR_USTRORM_INTMEM + | ||
457 | USTORM_ASSERT_LIST_OFFSET(i) + 12); | ||
458 | |||
459 | if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { | ||
460 | BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x" | ||
461 | " 0x%08x 0x%08x 0x%08x\n", | ||
462 | i, row3, row2, row1, row0); | ||
463 | rc++; | ||
464 | } else { | ||
465 | break; | ||
466 | } | ||
467 | } | ||
468 | |||
389 | return rc; | 469 | return rc; |
390 | } | 470 | } |
391 | 471 | ||
@@ -428,14 +508,16 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
428 | struct eth_tx_db_data *hw_prods = fp->hw_tx_prods; | 508 | struct eth_tx_db_data *hw_prods = fp->hw_tx_prods; |
429 | 509 | ||
430 | BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)" | 510 | BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)" |
431 | " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)" | 511 | " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", |
432 | " *rx_cons_sb(%x) rx_comp_prod(%x)" | ||
433 | " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)" | ||
434 | " bd data(%x,%x)\n", | ||
435 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, | 512 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, |
436 | fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb, | 513 | fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); |
437 | fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx, | 514 | BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)" |
438 | fp->fp_u_idx, hw_prods->packets_prod, | 515 | " *rx_cons_sb(%x)\n", |
516 | fp->rx_comp_prod, fp->rx_comp_cons, | ||
517 | le16_to_cpu(*fp->rx_cons_sb)); | ||
518 | BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)" | ||
519 | " bd data(%x,%x)\n", | ||
520 | fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod, | ||
439 | hw_prods->bds_prod); | 521 | hw_prods->bds_prod); |
440 | 522 | ||
441 | start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); | 523 | start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); |
@@ -463,7 +545,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
463 | struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; | 545 | struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; |
464 | 546 | ||
465 | BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", | 547 | BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", |
466 | j, rx_bd[0], rx_bd[1], sw_bd->skb); | 548 | j, rx_bd[1], rx_bd[0], sw_bd->skb); |
467 | } | 549 | } |
468 | 550 | ||
469 | start = RCQ_BD(fp->rx_comp_cons - 10); | 551 | start = RCQ_BD(fp->rx_comp_cons - 10); |
@@ -482,7 +564,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
482 | bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, | 564 | bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, |
483 | bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); | 565 | bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); |
484 | 566 | ||
485 | 567 | bnx2x_fw_dump(bp); | |
486 | bnx2x_mc_assert(bp); | 568 | bnx2x_mc_assert(bp); |
487 | BNX2X_ERR("end crash dump -----------------\n"); | 569 | BNX2X_ERR("end crash dump -----------------\n"); |
488 | 570 | ||
@@ -492,7 +574,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
492 | 574 | ||
493 | static void bnx2x_int_enable(struct bnx2x *bp) | 575 | static void bnx2x_int_enable(struct bnx2x *bp) |
494 | { | 576 | { |
495 | int port = bp->port; | 577 | int port = BP_PORT(bp); |
496 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; | 578 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; |
497 | u32 val = REG_RD(bp, addr); | 579 | u32 val = REG_RD(bp, addr); |
498 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; | 580 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; |
@@ -507,7 +589,6 @@ static void bnx2x_int_enable(struct bnx2x *bp) | |||
507 | HC_CONFIG_0_REG_INT_LINE_EN_0 | | 589 | HC_CONFIG_0_REG_INT_LINE_EN_0 | |
508 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); | 590 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); |
509 | 591 | ||
510 | /* Errata A0.158 workaround */ | ||
511 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n", | 592 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n", |
512 | val, port, addr, msix); | 593 | val, port, addr, msix); |
513 | 594 | ||
@@ -520,11 +601,25 @@ static void bnx2x_int_enable(struct bnx2x *bp) | |||
520 | val, port, addr, msix); | 601 | val, port, addr, msix); |
521 | 602 | ||
522 | REG_WR(bp, addr, val); | 603 | REG_WR(bp, addr, val); |
604 | |||
605 | if (CHIP_IS_E1H(bp)) { | ||
606 | /* init leading/trailing edge */ | ||
607 | if (IS_E1HMF(bp)) { | ||
608 | val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4))); | ||
609 | if (bp->port.pmf) | ||
610 | /* enable nig attention */ | ||
611 | val |= 0x0100; | ||
612 | } else | ||
613 | val = 0xffff; | ||
614 | |||
615 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); | ||
616 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); | ||
617 | } | ||
523 | } | 618 | } |
524 | 619 | ||
525 | static void bnx2x_int_disable(struct bnx2x *bp) | 620 | static void bnx2x_int_disable(struct bnx2x *bp) |
526 | { | 621 | { |
527 | int port = bp->port; | 622 | int port = BP_PORT(bp); |
528 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; | 623 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; |
529 | u32 val = REG_RD(bp, addr); | 624 | u32 val = REG_RD(bp, addr); |
530 | 625 | ||
@@ -543,10 +638,10 @@ static void bnx2x_int_disable(struct bnx2x *bp) | |||
543 | 638 | ||
544 | static void bnx2x_int_disable_sync(struct bnx2x *bp) | 639 | static void bnx2x_int_disable_sync(struct bnx2x *bp) |
545 | { | 640 | { |
546 | |||
547 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; | 641 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; |
548 | int i; | 642 | int i; |
549 | 643 | ||
644 | /* disable interrupt handling */ | ||
550 | atomic_inc(&bp->intr_sem); | 645 | atomic_inc(&bp->intr_sem); |
551 | /* prevent the HW from sending interrupts */ | 646 | /* prevent the HW from sending interrupts */ |
552 | bnx2x_int_disable(bp); | 647 | bnx2x_int_disable(bp); |
@@ -563,30 +658,29 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp) | |||
563 | 658 | ||
564 | /* make sure sp_task is not running */ | 659 | /* make sure sp_task is not running */ |
565 | cancel_work_sync(&bp->sp_task); | 660 | cancel_work_sync(&bp->sp_task); |
566 | |||
567 | } | 661 | } |
568 | 662 | ||
569 | /* fast path code */ | 663 | /* fast path */ |
570 | 664 | ||
571 | /* | 665 | /* |
572 | * general service functions | 666 | * General service functions |
573 | */ | 667 | */ |
574 | 668 | ||
575 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id, | 669 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, |
576 | u8 storm, u16 index, u8 op, u8 update) | 670 | u8 storm, u16 index, u8 op, u8 update) |
577 | { | 671 | { |
578 | u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8; | 672 | u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; |
579 | struct igu_ack_register igu_ack; | 673 | struct igu_ack_register igu_ack; |
580 | 674 | ||
581 | igu_ack.status_block_index = index; | 675 | igu_ack.status_block_index = index; |
582 | igu_ack.sb_id_and_flags = | 676 | igu_ack.sb_id_and_flags = |
583 | ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | | 677 | ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | |
584 | (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | | 678 | (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | |
585 | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | | 679 | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | |
586 | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); | 680 | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); |
587 | 681 | ||
588 | /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", | 682 | DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n", |
589 | (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */ | 683 | (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); |
590 | REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack)); | 684 | REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack)); |
591 | } | 685 | } |
592 | 686 | ||
@@ -614,8 +708,9 @@ static inline int bnx2x_has_work(struct bnx2x_fastpath *fp) | |||
614 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | 708 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) |
615 | rx_cons_sb++; | 709 | rx_cons_sb++; |
616 | 710 | ||
617 | if ((rx_cons_sb != fp->rx_comp_cons) || | 711 | if ((fp->rx_comp_cons != rx_cons_sb) || |
618 | (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)) | 712 | (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || |
713 | (fp->tx_pkt_prod != fp->tx_pkt_cons)) | ||
619 | return 1; | 714 | return 1; |
620 | 715 | ||
621 | return 0; | 716 | return 0; |
@@ -623,11 +718,11 @@ static inline int bnx2x_has_work(struct bnx2x_fastpath *fp) | |||
623 | 718 | ||
624 | static u16 bnx2x_ack_int(struct bnx2x *bp) | 719 | static u16 bnx2x_ack_int(struct bnx2x *bp) |
625 | { | 720 | { |
626 | u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8; | 721 | u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; |
627 | u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr); | 722 | u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr); |
628 | 723 | ||
629 | /* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n", | 724 | DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n", |
630 | result, BAR_IGU_INTMEM + igu_addr); */ | 725 | result, BAR_IGU_INTMEM + igu_addr); |
631 | 726 | ||
632 | #ifdef IGU_DEBUG | 727 | #ifdef IGU_DEBUG |
633 | #warning IGU_DEBUG active | 728 | #warning IGU_DEBUG active |
@@ -653,7 +748,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
653 | struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; | 748 | struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; |
654 | struct eth_tx_bd *tx_bd; | 749 | struct eth_tx_bd *tx_bd; |
655 | struct sk_buff *skb = tx_buf->skb; | 750 | struct sk_buff *skb = tx_buf->skb; |
656 | u16 bd_idx = tx_buf->first_bd; | 751 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; |
657 | int nbd; | 752 | int nbd; |
658 | 753 | ||
659 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", | 754 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", |
@@ -666,9 +761,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
666 | BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE); | 761 | BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE); |
667 | 762 | ||
668 | nbd = le16_to_cpu(tx_bd->nbd) - 1; | 763 | nbd = le16_to_cpu(tx_bd->nbd) - 1; |
764 | new_cons = nbd + tx_buf->first_bd; | ||
669 | #ifdef BNX2X_STOP_ON_ERROR | 765 | #ifdef BNX2X_STOP_ON_ERROR |
670 | if (nbd > (MAX_SKB_FRAGS + 2)) { | 766 | if (nbd > (MAX_SKB_FRAGS + 2)) { |
671 | BNX2X_ERR("bad nbd!\n"); | 767 | BNX2X_ERR("BAD nbd!\n"); |
672 | bnx2x_panic(); | 768 | bnx2x_panic(); |
673 | } | 769 | } |
674 | #endif | 770 | #endif |
@@ -708,32 +804,30 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
708 | tx_buf->first_bd = 0; | 804 | tx_buf->first_bd = 0; |
709 | tx_buf->skb = NULL; | 805 | tx_buf->skb = NULL; |
710 | 806 | ||
711 | return bd_idx; | 807 | return new_cons; |
712 | } | 808 | } |
713 | 809 | ||
714 | static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp) | 810 | static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) |
715 | { | 811 | { |
716 | u16 used; | 812 | s16 used; |
717 | u32 prod; | 813 | u16 prod; |
718 | u32 cons; | 814 | u16 cons; |
719 | 815 | ||
720 | /* Tell compiler that prod and cons can change */ | 816 | barrier(); /* Tell compiler that prod and cons can change */ |
721 | barrier(); | ||
722 | prod = fp->tx_bd_prod; | 817 | prod = fp->tx_bd_prod; |
723 | cons = fp->tx_bd_cons; | 818 | cons = fp->tx_bd_cons; |
724 | 819 | ||
725 | used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons + | 820 | /* NUM_TX_RINGS = number of "next-page" entries |
726 | (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT)); | 821 | It will be used as a threshold */ |
727 | 822 | used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; | |
728 | if (prod >= cons) { | ||
729 | /* used = prod - cons - prod/size + cons/size */ | ||
730 | used -= NUM_TX_BD - NUM_TX_RINGS; | ||
731 | } | ||
732 | 823 | ||
824 | #ifdef BNX2X_STOP_ON_ERROR | ||
825 | BUG_TRAP(used >= 0); | ||
733 | BUG_TRAP(used <= fp->bp->tx_ring_size); | 826 | BUG_TRAP(used <= fp->bp->tx_ring_size); |
734 | BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL); | 827 | BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL); |
828 | #endif | ||
735 | 829 | ||
736 | return (fp->bp->tx_ring_size - used); | 830 | return (s16)(fp->bp->tx_ring_size) - used; |
737 | } | 831 | } |
738 | 832 | ||
739 | static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) | 833 | static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) |
@@ -757,10 +851,10 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) | |||
757 | 851 | ||
758 | /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */ | 852 | /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */ |
759 | 853 | ||
760 | DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n", | 854 | DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n", |
761 | hw_cons, sw_cons, pkt_cons); | 855 | hw_cons, sw_cons, pkt_cons); |
762 | 856 | ||
763 | /* if (NEXT_TX_IDX(sw_cons) != hw_cons) { | 857 | /* if (NEXT_TX_IDX(sw_cons) != hw_cons) { |
764 | rmb(); | 858 | rmb(); |
765 | prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb); | 859 | prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb); |
766 | } | 860 | } |
@@ -793,7 +887,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) | |||
793 | netif_wake_queue(bp->dev); | 887 | netif_wake_queue(bp->dev); |
794 | 888 | ||
795 | netif_tx_unlock(bp->dev); | 889 | netif_tx_unlock(bp->dev); |
796 | |||
797 | } | 890 | } |
798 | } | 891 | } |
799 | 892 | ||
@@ -804,13 +897,14 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
804 | int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); | 897 | int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); |
805 | int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); | 898 | int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); |
806 | 899 | ||
807 | DP(NETIF_MSG_RX_STATUS, | 900 | DP(BNX2X_MSG_SP, |
808 | "fp %d cid %d got ramrod #%d state is %x type is %d\n", | 901 | "fp %d cid %d got ramrod #%d state is %x type is %d\n", |
809 | fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type); | 902 | FP_IDX(fp), cid, command, bp->state, |
903 | rr_cqe->ramrod_cqe.ramrod_type); | ||
810 | 904 | ||
811 | bp->spq_left++; | 905 | bp->spq_left++; |
812 | 906 | ||
813 | if (fp->index) { | 907 | if (FP_IDX(fp)) { |
814 | switch (command | fp->state) { | 908 | switch (command | fp->state) { |
815 | case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | | 909 | case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | |
816 | BNX2X_FP_STATE_OPENING): | 910 | BNX2X_FP_STATE_OPENING): |
@@ -826,10 +920,11 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
826 | break; | 920 | break; |
827 | 921 | ||
828 | default: | 922 | default: |
829 | BNX2X_ERR("unexpected MC reply(%d) state is %x\n", | 923 | BNX2X_ERR("unexpected MC reply (%d) " |
830 | command, fp->state); | 924 | "fp->state is %x\n", command, fp->state); |
925 | break; | ||
831 | } | 926 | } |
832 | mb(); /* force bnx2x_wait_ramrod to see the change */ | 927 | mb(); /* force bnx2x_wait_ramrod() to see the change */ |
833 | return; | 928 | return; |
834 | } | 929 | } |
835 | 930 | ||
@@ -846,25 +941,25 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
846 | break; | 941 | break; |
847 | 942 | ||
848 | case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT): | 943 | case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT): |
849 | DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", | 944 | DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid); |
850 | cid); | ||
851 | bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; | 945 | bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; |
852 | break; | 946 | break; |
853 | 947 | ||
854 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): | 948 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): |
949 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): | ||
855 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); | 950 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); |
856 | break; | 951 | break; |
857 | 952 | ||
858 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): | 953 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): |
859 | DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n"); | 954 | DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); |
860 | break; | 955 | break; |
861 | 956 | ||
862 | default: | 957 | default: |
863 | BNX2X_ERR("unexpected ramrod (%d) state is %x\n", | 958 | BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n", |
864 | command, bp->state); | 959 | command, bp->state); |
960 | break; | ||
865 | } | 961 | } |
866 | 962 | mb(); /* force bnx2x_wait_ramrod() to see the change */ | |
867 | mb(); /* force bnx2x_wait_ramrod to see the change */ | ||
868 | } | 963 | } |
869 | 964 | ||
870 | static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | 965 | static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, |
@@ -882,7 +977,6 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | |||
882 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 977 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, |
883 | PCI_DMA_FROMDEVICE); | 978 | PCI_DMA_FROMDEVICE); |
884 | if (unlikely(dma_mapping_error(mapping))) { | 979 | if (unlikely(dma_mapping_error(mapping))) { |
885 | |||
886 | dev_kfree_skb(skb); | 980 | dev_kfree_skb(skb); |
887 | return -ENOMEM; | 981 | return -ENOMEM; |
888 | } | 982 | } |
@@ -924,7 +1018,7 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, | |||
924 | static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | 1018 | static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) |
925 | { | 1019 | { |
926 | struct bnx2x *bp = fp->bp; | 1020 | struct bnx2x *bp = fp->bp; |
927 | u16 bd_cons, bd_prod, comp_ring_cons; | 1021 | u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; |
928 | u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; | 1022 | u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; |
929 | int rx_pkt = 0; | 1023 | int rx_pkt = 0; |
930 | 1024 | ||
@@ -933,12 +1027,15 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
933 | return 0; | 1027 | return 0; |
934 | #endif | 1028 | #endif |
935 | 1029 | ||
1030 | /* CQ "next element" is of the size of the regular element, | ||
1031 | that's why it's ok here */ | ||
936 | hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); | 1032 | hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); |
937 | if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | 1033 | if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) |
938 | hw_comp_cons++; | 1034 | hw_comp_cons++; |
939 | 1035 | ||
940 | bd_cons = fp->rx_bd_cons; | 1036 | bd_cons = fp->rx_bd_cons; |
941 | bd_prod = fp->rx_bd_prod; | 1037 | bd_prod = fp->rx_bd_prod; |
1038 | bd_prod_fw = bd_prod; | ||
942 | sw_comp_cons = fp->rx_comp_cons; | 1039 | sw_comp_cons = fp->rx_comp_cons; |
943 | sw_comp_prod = fp->rx_comp_prod; | 1040 | sw_comp_prod = fp->rx_comp_prod; |
944 | 1041 | ||
@@ -949,34 +1046,31 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
949 | 1046 | ||
950 | DP(NETIF_MSG_RX_STATUS, | 1047 | DP(NETIF_MSG_RX_STATUS, |
951 | "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", | 1048 | "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", |
952 | fp->index, hw_comp_cons, sw_comp_cons); | 1049 | FP_IDX(fp), hw_comp_cons, sw_comp_cons); |
953 | 1050 | ||
954 | while (sw_comp_cons != hw_comp_cons) { | 1051 | while (sw_comp_cons != hw_comp_cons) { |
955 | unsigned int len, pad; | 1052 | struct sw_rx_bd *rx_buf = NULL; |
956 | struct sw_rx_bd *rx_buf; | ||
957 | struct sk_buff *skb; | 1053 | struct sk_buff *skb; |
958 | union eth_rx_cqe *cqe; | 1054 | union eth_rx_cqe *cqe; |
1055 | u8 cqe_fp_flags; | ||
1056 | u16 len, pad; | ||
959 | 1057 | ||
960 | comp_ring_cons = RCQ_BD(sw_comp_cons); | 1058 | comp_ring_cons = RCQ_BD(sw_comp_cons); |
961 | bd_prod = RX_BD(bd_prod); | 1059 | bd_prod = RX_BD(bd_prod); |
962 | bd_cons = RX_BD(bd_cons); | 1060 | bd_cons = RX_BD(bd_cons); |
963 | 1061 | ||
964 | cqe = &fp->rx_comp_ring[comp_ring_cons]; | 1062 | cqe = &fp->rx_comp_ring[comp_ring_cons]; |
1063 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; | ||
965 | 1064 | ||
966 | DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u" | ||
967 | " comp_ring (%u) bd_ring (%u,%u)\n", | ||
968 | hw_comp_cons, sw_comp_cons, | ||
969 | comp_ring_cons, bd_prod, bd_cons); | ||
970 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" | 1065 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" |
971 | " queue %x vlan %x len %x\n", | 1066 | " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), |
972 | cqe->fast_path_cqe.type, | 1067 | cqe_fp_flags, cqe->fast_path_cqe.status_flags, |
973 | cqe->fast_path_cqe.error_type_flags, | ||
974 | cqe->fast_path_cqe.status_flags, | ||
975 | cqe->fast_path_cqe.rss_hash_result, | 1068 | cqe->fast_path_cqe.rss_hash_result, |
976 | cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len); | 1069 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), |
1070 | le16_to_cpu(cqe->fast_path_cqe.pkt_len)); | ||
977 | 1071 | ||
978 | /* is this a slowpath msg? */ | 1072 | /* is this a slowpath msg? */ |
979 | if (unlikely(cqe->fast_path_cqe.type)) { | 1073 | if (unlikely(CQE_TYPE(cqe_fp_flags))) { |
980 | bnx2x_sp_event(fp, cqe); | 1074 | bnx2x_sp_event(fp, cqe); |
981 | goto next_cqe; | 1075 | goto next_cqe; |
982 | 1076 | ||
@@ -984,7 +1078,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
984 | } else { | 1078 | } else { |
985 | rx_buf = &fp->rx_buf_ring[bd_cons]; | 1079 | rx_buf = &fp->rx_buf_ring[bd_cons]; |
986 | skb = rx_buf->skb; | 1080 | skb = rx_buf->skb; |
987 | |||
988 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | 1081 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); |
989 | pad = cqe->fast_path_cqe.placement_offset; | 1082 | pad = cqe->fast_path_cqe.placement_offset; |
990 | 1083 | ||
@@ -996,13 +1089,11 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
996 | prefetch(((char *)(skb)) + 128); | 1089 | prefetch(((char *)(skb)) + 128); |
997 | 1090 | ||
998 | /* is this an error packet? */ | 1091 | /* is this an error packet? */ |
999 | if (unlikely(cqe->fast_path_cqe.error_type_flags & | 1092 | if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { |
1000 | ETH_RX_ERROR_FALGS)) { | ||
1001 | /* do we sometimes forward error packets anyway? */ | 1093 | /* do we sometimes forward error packets anyway? */ |
1002 | DP(NETIF_MSG_RX_ERR, | 1094 | DP(NETIF_MSG_RX_ERR, |
1003 | "ERROR flags(%u) Rx packet(%u)\n", | 1095 | "ERROR flags %x rx packet %u\n", |
1004 | cqe->fast_path_cqe.error_type_flags, | 1096 | cqe_fp_flags, sw_comp_cons); |
1005 | sw_comp_cons); | ||
1006 | /* TBD make sure MC counts this as a drop */ | 1097 | /* TBD make sure MC counts this as a drop */ |
1007 | goto reuse_rx; | 1098 | goto reuse_rx; |
1008 | } | 1099 | } |
@@ -1018,7 +1109,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1018 | len + pad); | 1109 | len + pad); |
1019 | if (new_skb == NULL) { | 1110 | if (new_skb == NULL) { |
1020 | DP(NETIF_MSG_RX_ERR, | 1111 | DP(NETIF_MSG_RX_ERR, |
1021 | "ERROR packet dropped " | 1112 | "ERROR packet dropped " |
1022 | "because of alloc failure\n"); | 1113 | "because of alloc failure\n"); |
1023 | /* TBD count this as a drop? */ | 1114 | /* TBD count this as a drop? */ |
1024 | goto reuse_rx; | 1115 | goto reuse_rx; |
@@ -1044,7 +1135,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1044 | 1135 | ||
1045 | } else { | 1136 | } else { |
1046 | DP(NETIF_MSG_RX_ERR, | 1137 | DP(NETIF_MSG_RX_ERR, |
1047 | "ERROR packet dropped because " | 1138 | "ERROR packet dropped because " |
1048 | "of alloc failure\n"); | 1139 | "of alloc failure\n"); |
1049 | reuse_rx: | 1140 | reuse_rx: |
1050 | bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); | 1141 | bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); |
@@ -1061,14 +1152,14 @@ reuse_rx: | |||
1061 | } | 1152 | } |
1062 | 1153 | ||
1063 | #ifdef BCM_VLAN | 1154 | #ifdef BCM_VLAN |
1064 | if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) | 1155 | if ((bp->vlgrp != NULL) && |
1065 | & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS) | 1156 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & |
1066 | && (bp->vlgrp != NULL)) | 1157 | PARSING_FLAGS_VLAN)) |
1067 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, | 1158 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, |
1068 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); | 1159 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); |
1069 | else | 1160 | else |
1070 | #endif | 1161 | #endif |
1071 | netif_receive_skb(skb); | 1162 | netif_receive_skb(skb); |
1072 | 1163 | ||
1073 | bp->dev->last_rx = jiffies; | 1164 | bp->dev->last_rx = jiffies; |
1074 | 1165 | ||
@@ -1077,22 +1168,25 @@ next_rx: | |||
1077 | 1168 | ||
1078 | bd_cons = NEXT_RX_IDX(bd_cons); | 1169 | bd_cons = NEXT_RX_IDX(bd_cons); |
1079 | bd_prod = NEXT_RX_IDX(bd_prod); | 1170 | bd_prod = NEXT_RX_IDX(bd_prod); |
1171 | bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); | ||
1172 | rx_pkt++; | ||
1080 | next_cqe: | 1173 | next_cqe: |
1081 | sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); | 1174 | sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); |
1082 | sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); | 1175 | sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); |
1083 | rx_pkt++; | ||
1084 | 1176 | ||
1085 | if ((rx_pkt == budget)) | 1177 | if (rx_pkt == budget) |
1086 | break; | 1178 | break; |
1087 | } /* while */ | 1179 | } /* while */ |
1088 | 1180 | ||
1089 | fp->rx_bd_cons = bd_cons; | 1181 | fp->rx_bd_cons = bd_cons; |
1090 | fp->rx_bd_prod = bd_prod; | 1182 | fp->rx_bd_prod = bd_prod_fw; |
1091 | fp->rx_comp_cons = sw_comp_cons; | 1183 | fp->rx_comp_cons = sw_comp_cons; |
1092 | fp->rx_comp_prod = sw_comp_prod; | 1184 | fp->rx_comp_prod = sw_comp_prod; |
1093 | 1185 | ||
1094 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 1186 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
1095 | TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod); | 1187 | TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)), |
1188 | sw_comp_prod); | ||
1189 | |||
1096 | 1190 | ||
1097 | mmiowb(); /* keep prod updates ordered */ | 1191 | mmiowb(); /* keep prod updates ordered */ |
1098 | 1192 | ||
@@ -1107,10 +1201,11 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
1107 | struct bnx2x_fastpath *fp = fp_cookie; | 1201 | struct bnx2x_fastpath *fp = fp_cookie; |
1108 | struct bnx2x *bp = fp->bp; | 1202 | struct bnx2x *bp = fp->bp; |
1109 | struct net_device *dev = bp->dev; | 1203 | struct net_device *dev = bp->dev; |
1110 | int index = fp->index; | 1204 | int index = FP_IDX(fp); |
1111 | 1205 | ||
1112 | DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index); | 1206 | DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", |
1113 | bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0); | 1207 | index, FP_SB_ID(fp)); |
1208 | bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); | ||
1114 | 1209 | ||
1115 | #ifdef BNX2X_STOP_ON_ERROR | 1210 | #ifdef BNX2X_STOP_ON_ERROR |
1116 | if (unlikely(bp->panic)) | 1211 | if (unlikely(bp->panic)) |
@@ -1123,6 +1218,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
1123 | prefetch(&fp->status_blk->u_status_block.status_block_index); | 1218 | prefetch(&fp->status_blk->u_status_block.status_block_index); |
1124 | 1219 | ||
1125 | netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi)); | 1220 | netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi)); |
1221 | |||
1126 | return IRQ_HANDLED; | 1222 | return IRQ_HANDLED; |
1127 | } | 1223 | } |
1128 | 1224 | ||
@@ -1131,26 +1227,28 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1131 | struct net_device *dev = dev_instance; | 1227 | struct net_device *dev = dev_instance; |
1132 | struct bnx2x *bp = netdev_priv(dev); | 1228 | struct bnx2x *bp = netdev_priv(dev); |
1133 | u16 status = bnx2x_ack_int(bp); | 1229 | u16 status = bnx2x_ack_int(bp); |
1230 | u16 mask; | ||
1134 | 1231 | ||
1232 | /* Return here if interrupt is shared and it's not for us */ | ||
1135 | if (unlikely(status == 0)) { | 1233 | if (unlikely(status == 0)) { |
1136 | DP(NETIF_MSG_INTR, "not our interrupt!\n"); | 1234 | DP(NETIF_MSG_INTR, "not our interrupt!\n"); |
1137 | return IRQ_NONE; | 1235 | return IRQ_NONE; |
1138 | } | 1236 | } |
1139 | 1237 | DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status); | |
1140 | DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status); | ||
1141 | 1238 | ||
1142 | #ifdef BNX2X_STOP_ON_ERROR | 1239 | #ifdef BNX2X_STOP_ON_ERROR |
1143 | if (unlikely(bp->panic)) | 1240 | if (unlikely(bp->panic)) |
1144 | return IRQ_HANDLED; | 1241 | return IRQ_HANDLED; |
1145 | #endif | 1242 | #endif |
1146 | 1243 | ||
1147 | /* Return here if interrupt is shared and is disabled */ | 1244 | /* Return here if interrupt is disabled */ |
1148 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | 1245 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { |
1149 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | 1246 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); |
1150 | return IRQ_HANDLED; | 1247 | return IRQ_HANDLED; |
1151 | } | 1248 | } |
1152 | 1249 | ||
1153 | if (status & 0x2) { | 1250 | mask = 0x2 << bp->fp[0].sb_id; |
1251 | if (status & mask) { | ||
1154 | struct bnx2x_fastpath *fp = &bp->fp[0]; | 1252 | struct bnx2x_fastpath *fp = &bp->fp[0]; |
1155 | 1253 | ||
1156 | prefetch(fp->rx_cons_sb); | 1254 | prefetch(fp->rx_cons_sb); |
@@ -1160,13 +1258,11 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1160 | 1258 | ||
1161 | netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi)); | 1259 | netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi)); |
1162 | 1260 | ||
1163 | status &= ~0x2; | 1261 | status &= ~mask; |
1164 | if (!status) | ||
1165 | return IRQ_HANDLED; | ||
1166 | } | 1262 | } |
1167 | 1263 | ||
1168 | if (unlikely(status & 0x1)) { | ||
1169 | 1264 | ||
1265 | if (unlikely(status & 0x1)) { | ||
1170 | schedule_work(&bp->sp_task); | 1266 | schedule_work(&bp->sp_task); |
1171 | 1267 | ||
1172 | status &= ~0x1; | 1268 | status &= ~0x1; |
@@ -1174,8 +1270,9 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1174 | return IRQ_HANDLED; | 1270 | return IRQ_HANDLED; |
1175 | } | 1271 | } |
1176 | 1272 | ||
1177 | DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n", | 1273 | if (status) |
1178 | status); | 1274 | DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n", |
1275 | status); | ||
1179 | 1276 | ||
1180 | return IRQ_HANDLED; | 1277 | return IRQ_HANDLED; |
1181 | } | 1278 | } |
@@ -1193,7 +1290,7 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) | |||
1193 | { | 1290 | { |
1194 | u32 lock_status; | 1291 | u32 lock_status; |
1195 | u32 resource_bit = (1 << resource); | 1292 | u32 resource_bit = (1 << resource); |
1196 | u8 port = bp->port; | 1293 | u8 port = BP_PORT(bp); |
1197 | int cnt; | 1294 | int cnt; |
1198 | 1295 | ||
1199 | /* Validating that the resource is within range */ | 1296 | /* Validating that the resource is within range */ |
@@ -1231,7 +1328,7 @@ static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource) | |||
1231 | { | 1328 | { |
1232 | u32 lock_status; | 1329 | u32 lock_status; |
1233 | u32 resource_bit = (1 << resource); | 1330 | u32 resource_bit = (1 << resource); |
1234 | u8 port = bp->port; | 1331 | u8 port = BP_PORT(bp); |
1235 | 1332 | ||
1236 | /* Validating that the resource is within range */ | 1333 | /* Validating that the resource is within range */ |
1237 | if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { | 1334 | if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { |
@@ -1258,7 +1355,7 @@ static void bnx2x_phy_hw_lock(struct bnx2x *bp) | |||
1258 | { | 1355 | { |
1259 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); | 1356 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); |
1260 | 1357 | ||
1261 | mutex_lock(&bp->phy_mutex); | 1358 | mutex_lock(&bp->port.phy_mutex); |
1262 | 1359 | ||
1263 | if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || | 1360 | if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || |
1264 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) | 1361 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) |
@@ -1273,14 +1370,14 @@ static void bnx2x_phy_hw_unlock(struct bnx2x *bp) | |||
1273 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) | 1370 | (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) |
1274 | bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); | 1371 | bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); |
1275 | 1372 | ||
1276 | mutex_unlock(&bp->phy_mutex); | 1373 | mutex_unlock(&bp->port.phy_mutex); |
1277 | } | 1374 | } |
1278 | 1375 | ||
1279 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) | 1376 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) |
1280 | { | 1377 | { |
1281 | /* The GPIO should be swapped if swap register is set and active */ | 1378 | /* The GPIO should be swapped if swap register is set and active */ |
1282 | int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && | 1379 | int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && |
1283 | REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port; | 1380 | REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp); |
1284 | int gpio_shift = gpio_num + | 1381 | int gpio_shift = gpio_num + |
1285 | (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); | 1382 | (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); |
1286 | u32 gpio_mask = (1 << gpio_shift); | 1383 | u32 gpio_mask = (1 << gpio_shift); |
@@ -1379,18 +1476,18 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp) | |||
1379 | { | 1476 | { |
1380 | switch (bp->link_vars.ieee_fc) { | 1477 | switch (bp->link_vars.ieee_fc) { |
1381 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: | 1478 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: |
1382 | bp->advertising &= ~(ADVERTISED_Asym_Pause | | 1479 | bp->port.advertising &= ~(ADVERTISED_Asym_Pause | |
1383 | ADVERTISED_Pause); | 1480 | ADVERTISED_Pause); |
1384 | break; | 1481 | break; |
1385 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: | 1482 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: |
1386 | bp->advertising |= (ADVERTISED_Asym_Pause | | 1483 | bp->port.advertising |= (ADVERTISED_Asym_Pause | |
1387 | ADVERTISED_Pause); | 1484 | ADVERTISED_Pause); |
1388 | break; | 1485 | break; |
1389 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: | 1486 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: |
1390 | bp->advertising |= ADVERTISED_Asym_Pause; | 1487 | bp->port.advertising |= ADVERTISED_Asym_Pause; |
1391 | break; | 1488 | break; |
1392 | default: | 1489 | default: |
1393 | bp->advertising &= ~(ADVERTISED_Asym_Pause | | 1490 | bp->port.advertising &= ~(ADVERTISED_Asym_Pause | |
1394 | ADVERTISED_Pause); | 1491 | ADVERTISED_Pause); |
1395 | break; | 1492 | break; |
1396 | } | 1493 | } |
@@ -1443,6 +1540,7 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp) | |||
1443 | bnx2x_link_report(bp); | 1540 | bnx2x_link_report(bp); |
1444 | 1541 | ||
1445 | bnx2x_calc_fc_adv(bp); | 1542 | bnx2x_calc_fc_adv(bp); |
1543 | |||
1446 | return rc; | 1544 | return rc; |
1447 | } | 1545 | } |
1448 | 1546 | ||
@@ -1473,15 +1571,261 @@ static u8 bnx2x_link_test(struct bnx2x *bp) | |||
1473 | return rc; | 1571 | return rc; |
1474 | } | 1572 | } |
1475 | 1573 | ||
1574 | /* Calculates the sum of vn_min_rates. | ||
1575 | It's needed for further normalizing of the min_rates. | ||
1576 | |||
1577 | Returns: | ||
1578 | sum of vn_min_rates | ||
1579 | or | ||
1580 | 0 - if all the min_rates are 0. | ||
1581 | In the later case fainess algorithm should be deactivated. | ||
1582 | If not all min_rates are zero then those that are zeroes will | ||
1583 | be set to 1. | ||
1584 | */ | ||
1585 | static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp) | ||
1586 | { | ||
1587 | int i, port = BP_PORT(bp); | ||
1588 | u32 wsum = 0; | ||
1589 | int all_zero = 1; | ||
1590 | |||
1591 | for (i = 0; i < E1HVN_MAX; i++) { | ||
1592 | u32 vn_cfg = | ||
1593 | SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config); | ||
1594 | u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | ||
1595 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | ||
1596 | if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) { | ||
1597 | /* If min rate is zero - set it to 1 */ | ||
1598 | if (!vn_min_rate) | ||
1599 | vn_min_rate = DEF_MIN_RATE; | ||
1600 | else | ||
1601 | all_zero = 0; | ||
1602 | |||
1603 | wsum += vn_min_rate; | ||
1604 | } | ||
1605 | } | ||
1606 | |||
1607 | /* ... only if all min rates are zeros - disable FAIRNESS */ | ||
1608 | if (all_zero) | ||
1609 | return 0; | ||
1610 | |||
1611 | return wsum; | ||
1612 | } | ||
1613 | |||
1614 | static void bnx2x_init_port_minmax(struct bnx2x *bp, | ||
1615 | int en_fness, | ||
1616 | u16 port_rate, | ||
1617 | struct cmng_struct_per_port *m_cmng_port) | ||
1618 | { | ||
1619 | u32 r_param = port_rate / 8; | ||
1620 | int port = BP_PORT(bp); | ||
1621 | int i; | ||
1622 | |||
1623 | memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port)); | ||
1624 | |||
1625 | /* Enable minmax only if we are in e1hmf mode */ | ||
1626 | if (IS_E1HMF(bp)) { | ||
1627 | u32 fair_periodic_timeout_usec; | ||
1628 | u32 t_fair; | ||
1629 | |||
1630 | /* Enable rate shaping and fairness */ | ||
1631 | m_cmng_port->flags.cmng_vn_enable = 1; | ||
1632 | m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0; | ||
1633 | m_cmng_port->flags.rate_shaping_enable = 1; | ||
1634 | |||
1635 | if (!en_fness) | ||
1636 | DP(NETIF_MSG_IFUP, "All MIN values are zeroes" | ||
1637 | " fairness will be disabled\n"); | ||
1638 | |||
1639 | /* 100 usec in SDM ticks = 25 since each tick is 4 usec */ | ||
1640 | m_cmng_port->rs_vars.rs_periodic_timeout = | ||
1641 | RS_PERIODIC_TIMEOUT_USEC / 4; | ||
1642 | |||
1643 | /* this is the threshold below which no timer arming will occur | ||
1644 | 1.25 coefficient is for the threshold to be a little bigger | ||
1645 | than the real time, to compensate for timer in-accuracy */ | ||
1646 | m_cmng_port->rs_vars.rs_threshold = | ||
1647 | (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4; | ||
1648 | |||
1649 | /* resolution of fairness timer */ | ||
1650 | fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; | ||
1651 | /* for 10G it is 1000usec. for 1G it is 10000usec. */ | ||
1652 | t_fair = T_FAIR_COEF / port_rate; | ||
1653 | |||
1654 | /* this is the threshold below which we won't arm | ||
1655 | the timer anymore */ | ||
1656 | m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES; | ||
1657 | |||
1658 | /* we multiply by 1e3/8 to get bytes/msec. | ||
1659 | We don't want the credits to pass a credit | ||
1660 | of the T_FAIR*FAIR_MEM (algorithm resolution) */ | ||
1661 | m_cmng_port->fair_vars.upper_bound = | ||
1662 | r_param * t_fair * FAIR_MEM; | ||
1663 | /* since each tick is 4 usec */ | ||
1664 | m_cmng_port->fair_vars.fairness_timeout = | ||
1665 | fair_periodic_timeout_usec / 4; | ||
1666 | |||
1667 | } else { | ||
1668 | /* Disable rate shaping and fairness */ | ||
1669 | m_cmng_port->flags.cmng_vn_enable = 0; | ||
1670 | m_cmng_port->flags.fairness_enable = 0; | ||
1671 | m_cmng_port->flags.rate_shaping_enable = 0; | ||
1672 | |||
1673 | DP(NETIF_MSG_IFUP, | ||
1674 | "Single function mode minmax will be disabled\n"); | ||
1675 | } | ||
1676 | |||
1677 | /* Store it to internal memory */ | ||
1678 | for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) | ||
1679 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
1680 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4, | ||
1681 | ((u32 *)(m_cmng_port))[i]); | ||
1682 | } | ||
1683 | |||
1684 | static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func, | ||
1685 | u32 wsum, u16 port_rate, | ||
1686 | struct cmng_struct_per_port *m_cmng_port) | ||
1687 | { | ||
1688 | struct rate_shaping_vars_per_vn m_rs_vn; | ||
1689 | struct fairness_vars_per_vn m_fair_vn; | ||
1690 | u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); | ||
1691 | u16 vn_min_rate, vn_max_rate; | ||
1692 | int i; | ||
1693 | |||
1694 | /* If function is hidden - set min and max to zeroes */ | ||
1695 | if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { | ||
1696 | vn_min_rate = 0; | ||
1697 | vn_max_rate = 0; | ||
1698 | |||
1699 | } else { | ||
1700 | vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | ||
1701 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | ||
1702 | /* If FAIRNESS is enabled (not all min rates are zeroes) and | ||
1703 | if current min rate is zero - set it to 1. | ||
1704 | This is a requirment of the algorithm. */ | ||
1705 | if ((vn_min_rate == 0) && wsum) | ||
1706 | vn_min_rate = DEF_MIN_RATE; | ||
1707 | vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
1708 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | ||
1709 | } | ||
1710 | |||
1711 | DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d " | ||
1712 | "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum); | ||
1713 | |||
1714 | memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); | ||
1715 | memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn)); | ||
1716 | |||
1717 | /* global vn counter - maximal Mbps for this vn */ | ||
1718 | m_rs_vn.vn_counter.rate = vn_max_rate; | ||
1719 | |||
1720 | /* quota - number of bytes transmitted in this period */ | ||
1721 | m_rs_vn.vn_counter.quota = | ||
1722 | (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8; | ||
1723 | |||
1724 | #ifdef BNX2X_PER_PROT_QOS | ||
1725 | /* per protocol counter */ | ||
1726 | for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) { | ||
1727 | /* maximal Mbps for this protocol */ | ||
1728 | m_rs_vn.protocol_counters[protocol].rate = | ||
1729 | protocol_max_rate[protocol]; | ||
1730 | /* the quota in each timer period - | ||
1731 | number of bytes transmitted in this period */ | ||
1732 | m_rs_vn.protocol_counters[protocol].quota = | ||
1733 | (u32)(rs_periodic_timeout_usec * | ||
1734 | ((double)m_rs_vn. | ||
1735 | protocol_counters[protocol].rate/8)); | ||
1736 | } | ||
1737 | #endif | ||
1738 | |||
1739 | if (wsum) { | ||
1740 | /* credit for each period of the fairness algorithm: | ||
1741 | number of bytes in T_FAIR (the vn share the port rate). | ||
1742 | wsum should not be larger than 10000, thus | ||
1743 | T_FAIR_COEF / (8 * wsum) will always be grater than zero */ | ||
1744 | m_fair_vn.vn_credit_delta = | ||
1745 | max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))), | ||
1746 | (u64)(m_cmng_port->fair_vars.fair_threshold * 2)); | ||
1747 | DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n", | ||
1748 | m_fair_vn.vn_credit_delta); | ||
1749 | } | ||
1750 | |||
1751 | #ifdef BNX2X_PER_PROT_QOS | ||
1752 | do { | ||
1753 | u32 protocolWeightSum = 0; | ||
1754 | |||
1755 | for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) | ||
1756 | protocolWeightSum += | ||
1757 | drvInit.protocol_min_rate[protocol]; | ||
1758 | /* per protocol counter - | ||
1759 | NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */ | ||
1760 | if (protocolWeightSum > 0) { | ||
1761 | for (protocol = 0; | ||
1762 | protocol < NUM_OF_PROTOCOLS; protocol++) | ||
1763 | /* credit for each period of the | ||
1764 | fairness algorithm - number of bytes in | ||
1765 | T_FAIR (the protocol share the vn rate) */ | ||
1766 | m_fair_vn.protocol_credit_delta[protocol] = | ||
1767 | (u32)((vn_min_rate / 8) * t_fair * | ||
1768 | protocol_min_rate / protocolWeightSum); | ||
1769 | } | ||
1770 | } while (0); | ||
1771 | #endif | ||
1772 | |||
1773 | /* Store it to internal memory */ | ||
1774 | for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++) | ||
1775 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
1776 | XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4, | ||
1777 | ((u32 *)(&m_rs_vn))[i]); | ||
1778 | |||
1779 | for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++) | ||
1780 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
1781 | XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, | ||
1782 | ((u32 *)(&m_fair_vn))[i]); | ||
1783 | } | ||
1784 | |||
1476 | /* This function is called upon link interrupt */ | 1785 | /* This function is called upon link interrupt */ |
1477 | static void bnx2x_link_attn(struct bnx2x *bp) | 1786 | static void bnx2x_link_attn(struct bnx2x *bp) |
1478 | { | 1787 | { |
1788 | int vn; | ||
1789 | |||
1479 | bnx2x_phy_hw_lock(bp); | 1790 | bnx2x_phy_hw_lock(bp); |
1480 | bnx2x_link_update(&bp->link_params, &bp->link_vars); | 1791 | bnx2x_link_update(&bp->link_params, &bp->link_vars); |
1481 | bnx2x_phy_hw_unlock(bp); | 1792 | bnx2x_phy_hw_unlock(bp); |
1482 | 1793 | ||
1483 | /* indicate link status */ | 1794 | /* indicate link status */ |
1484 | bnx2x_link_report(bp); | 1795 | bnx2x_link_report(bp); |
1796 | |||
1797 | if (IS_E1HMF(bp)) { | ||
1798 | int func; | ||
1799 | |||
1800 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | ||
1801 | if (vn == BP_E1HVN(bp)) | ||
1802 | continue; | ||
1803 | |||
1804 | func = ((vn << 1) | BP_PORT(bp)); | ||
1805 | |||
1806 | /* Set the attention towards other drivers | ||
1807 | on the same port */ | ||
1808 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + | ||
1809 | (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); | ||
1810 | } | ||
1811 | } | ||
1812 | |||
1813 | if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) { | ||
1814 | struct cmng_struct_per_port m_cmng_port; | ||
1815 | u32 wsum; | ||
1816 | int port = BP_PORT(bp); | ||
1817 | |||
1818 | /* Init RATE SHAPING and FAIRNESS contexts */ | ||
1819 | wsum = bnx2x_calc_vn_wsum(bp); | ||
1820 | bnx2x_init_port_minmax(bp, (int)wsum, | ||
1821 | bp->link_vars.line_speed, | ||
1822 | &m_cmng_port); | ||
1823 | if (IS_E1HMF(bp)) | ||
1824 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | ||
1825 | bnx2x_init_vn_minmax(bp, 2*vn + port, | ||
1826 | wsum, bp->link_vars.line_speed, | ||
1827 | &m_cmng_port); | ||
1828 | } | ||
1485 | } | 1829 | } |
1486 | 1830 | ||
1487 | static void bnx2x__link_status_update(struct bnx2x *bp) | 1831 | static void bnx2x__link_status_update(struct bnx2x *bp) |
@@ -1495,6 +1839,20 @@ static void bnx2x__link_status_update(struct bnx2x *bp) | |||
1495 | bnx2x_link_report(bp); | 1839 | bnx2x_link_report(bp); |
1496 | } | 1840 | } |
1497 | 1841 | ||
1842 | static void bnx2x_pmf_update(struct bnx2x *bp) | ||
1843 | { | ||
1844 | int port = BP_PORT(bp); | ||
1845 | u32 val; | ||
1846 | |||
1847 | bp->port.pmf = 1; | ||
1848 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | ||
1849 | |||
1850 | /* enable nig attention */ | ||
1851 | val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); | ||
1852 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); | ||
1853 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); | ||
1854 | } | ||
1855 | |||
1498 | /* end of Link */ | 1856 | /* end of Link */ |
1499 | 1857 | ||
1500 | /* slow path */ | 1858 | /* slow path */ |
@@ -1507,10 +1865,10 @@ static void bnx2x__link_status_update(struct bnx2x *bp) | |||
1507 | static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | 1865 | static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, |
1508 | u32 data_hi, u32 data_lo, int common) | 1866 | u32 data_hi, u32 data_lo, int common) |
1509 | { | 1867 | { |
1510 | int port = bp->port; | 1868 | int func = BP_FUNC(bp); |
1511 | 1869 | ||
1512 | DP(NETIF_MSG_TIMER, | 1870 | DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, |
1513 | "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", | 1871 | "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", |
1514 | (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + | 1872 | (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + |
1515 | (void *)bp->spq_prod_bd - (void *)bp->spq), command, | 1873 | (void *)bp->spq_prod_bd - (void *)bp->spq), command, |
1516 | HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); | 1874 | HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); |
@@ -1520,11 +1878,11 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
1520 | return -EIO; | 1878 | return -EIO; |
1521 | #endif | 1879 | #endif |
1522 | 1880 | ||
1523 | spin_lock(&bp->spq_lock); | 1881 | spin_lock_bh(&bp->spq_lock); |
1524 | 1882 | ||
1525 | if (!bp->spq_left) { | 1883 | if (!bp->spq_left) { |
1526 | BNX2X_ERR("BUG! SPQ ring full!\n"); | 1884 | BNX2X_ERR("BUG! SPQ ring full!\n"); |
1527 | spin_unlock(&bp->spq_lock); | 1885 | spin_unlock_bh(&bp->spq_lock); |
1528 | bnx2x_panic(); | 1886 | bnx2x_panic(); |
1529 | return -EBUSY; | 1887 | return -EBUSY; |
1530 | } | 1888 | } |
@@ -1553,18 +1911,18 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
1553 | bp->spq_prod_idx++; | 1911 | bp->spq_prod_idx++; |
1554 | } | 1912 | } |
1555 | 1913 | ||
1556 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port), | 1914 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), |
1557 | bp->spq_prod_idx); | 1915 | bp->spq_prod_idx); |
1558 | 1916 | ||
1559 | spin_unlock(&bp->spq_lock); | 1917 | spin_unlock_bh(&bp->spq_lock); |
1560 | return 0; | 1918 | return 0; |
1561 | } | 1919 | } |
1562 | 1920 | ||
1563 | /* acquire split MCP access lock register */ | 1921 | /* acquire split MCP access lock register */ |
1564 | static int bnx2x_lock_alr(struct bnx2x *bp) | 1922 | static int bnx2x_lock_alr(struct bnx2x *bp) |
1565 | { | 1923 | { |
1566 | int rc = 0; | ||
1567 | u32 i, j, val; | 1924 | u32 i, j, val; |
1925 | int rc = 0; | ||
1568 | 1926 | ||
1569 | might_sleep(); | 1927 | might_sleep(); |
1570 | i = 100; | 1928 | i = 100; |
@@ -1577,10 +1935,8 @@ static int bnx2x_lock_alr(struct bnx2x *bp) | |||
1577 | 1935 | ||
1578 | msleep(5); | 1936 | msleep(5); |
1579 | } | 1937 | } |
1580 | |||
1581 | if (!(val & (1L << 31))) { | 1938 | if (!(val & (1L << 31))) { |
1582 | BNX2X_ERR("Cannot acquire nvram interface\n"); | 1939 | BNX2X_ERR("Cannot acquire nvram interface\n"); |
1583 | |||
1584 | rc = -EBUSY; | 1940 | rc = -EBUSY; |
1585 | } | 1941 | } |
1586 | 1942 | ||
@@ -1631,8 +1987,9 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) | |||
1631 | 1987 | ||
1632 | static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | 1988 | static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) |
1633 | { | 1989 | { |
1634 | int port = bp->port; | 1990 | int port = BP_PORT(bp); |
1635 | u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8; | 1991 | int func = BP_FUNC(bp); |
1992 | u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8; | ||
1636 | u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : | 1993 | u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : |
1637 | MISC_REG_AEU_MASK_ATTN_FUNC_0; | 1994 | MISC_REG_AEU_MASK_ATTN_FUNC_0; |
1638 | u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : | 1995 | u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : |
@@ -1716,14 +2073,14 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | |||
1716 | 2073 | ||
1717 | static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) | 2074 | static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) |
1718 | { | 2075 | { |
1719 | int port = bp->port; | 2076 | int port = BP_PORT(bp); |
1720 | int reg_offset; | 2077 | int reg_offset; |
1721 | u32 val; | 2078 | u32 val; |
1722 | 2079 | ||
1723 | if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { | 2080 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
2081 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | ||
1724 | 2082 | ||
1725 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 2083 | if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { |
1726 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | ||
1727 | 2084 | ||
1728 | val = REG_RD(bp, reg_offset); | 2085 | val = REG_RD(bp, reg_offset); |
1729 | val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; | 2086 | val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; |
@@ -1731,7 +2088,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) | |||
1731 | 2088 | ||
1732 | BNX2X_ERR("SPIO5 hw attention\n"); | 2089 | BNX2X_ERR("SPIO5 hw attention\n"); |
1733 | 2090 | ||
1734 | switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 2091 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
1735 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 2092 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
1736 | /* Fan failure attention */ | 2093 | /* Fan failure attention */ |
1737 | 2094 | ||
@@ -1762,6 +2119,17 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) | |||
1762 | break; | 2119 | break; |
1763 | } | 2120 | } |
1764 | } | 2121 | } |
2122 | |||
2123 | if (attn & HW_INTERRUT_ASSERT_SET_0) { | ||
2124 | |||
2125 | val = REG_RD(bp, reg_offset); | ||
2126 | val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); | ||
2127 | REG_WR(bp, reg_offset, val); | ||
2128 | |||
2129 | BNX2X_ERR("FATAL HW block attention set0 0x%x\n", | ||
2130 | (attn & HW_INTERRUT_ASSERT_SET_0)); | ||
2131 | bnx2x_panic(); | ||
2132 | } | ||
1765 | } | 2133 | } |
1766 | 2134 | ||
1767 | static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) | 2135 | static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) |
@@ -1776,6 +2144,23 @@ static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) | |||
1776 | if (val & 0x2) | 2144 | if (val & 0x2) |
1777 | BNX2X_ERR("FATAL error from DORQ\n"); | 2145 | BNX2X_ERR("FATAL error from DORQ\n"); |
1778 | } | 2146 | } |
2147 | |||
2148 | if (attn & HW_INTERRUT_ASSERT_SET_1) { | ||
2149 | |||
2150 | int port = BP_PORT(bp); | ||
2151 | int reg_offset; | ||
2152 | |||
2153 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : | ||
2154 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); | ||
2155 | |||
2156 | val = REG_RD(bp, reg_offset); | ||
2157 | val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); | ||
2158 | REG_WR(bp, reg_offset, val); | ||
2159 | |||
2160 | BNX2X_ERR("FATAL HW block attention set1 0x%x\n", | ||
2161 | (attn & HW_INTERRUT_ASSERT_SET_1)); | ||
2162 | bnx2x_panic(); | ||
2163 | } | ||
1779 | } | 2164 | } |
1780 | 2165 | ||
1781 | static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) | 2166 | static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) |
@@ -1799,13 +2184,41 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) | |||
1799 | if (val & 0x18000) | 2184 | if (val & 0x18000) |
1800 | BNX2X_ERR("FATAL error from PXP\n"); | 2185 | BNX2X_ERR("FATAL error from PXP\n"); |
1801 | } | 2186 | } |
2187 | |||
2188 | if (attn & HW_INTERRUT_ASSERT_SET_2) { | ||
2189 | |||
2190 | int port = BP_PORT(bp); | ||
2191 | int reg_offset; | ||
2192 | |||
2193 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : | ||
2194 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); | ||
2195 | |||
2196 | val = REG_RD(bp, reg_offset); | ||
2197 | val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); | ||
2198 | REG_WR(bp, reg_offset, val); | ||
2199 | |||
2200 | BNX2X_ERR("FATAL HW block attention set2 0x%x\n", | ||
2201 | (attn & HW_INTERRUT_ASSERT_SET_2)); | ||
2202 | bnx2x_panic(); | ||
2203 | } | ||
1802 | } | 2204 | } |
1803 | 2205 | ||
1804 | static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) | 2206 | static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) |
1805 | { | 2207 | { |
2208 | u32 val; | ||
2209 | |||
1806 | if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { | 2210 | if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { |
1807 | 2211 | ||
1808 | if (attn & BNX2X_MC_ASSERT_BITS) { | 2212 | if (attn & BNX2X_PMF_LINK_ASSERT) { |
2213 | int func = BP_FUNC(bp); | ||
2214 | |||
2215 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); | ||
2216 | bnx2x__link_status_update(bp); | ||
2217 | if (SHMEM_RD(bp, func_mb[func].drv_status) & | ||
2218 | DRV_STATUS_PMF) | ||
2219 | bnx2x_pmf_update(bp); | ||
2220 | |||
2221 | } else if (attn & BNX2X_MC_ASSERT_BITS) { | ||
1809 | 2222 | ||
1810 | BNX2X_ERR("MC assert!\n"); | 2223 | BNX2X_ERR("MC assert!\n"); |
1811 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); | 2224 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); |
@@ -1818,16 +2231,25 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) | |||
1818 | 2231 | ||
1819 | BNX2X_ERR("MCP assert!\n"); | 2232 | BNX2X_ERR("MCP assert!\n"); |
1820 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); | 2233 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); |
1821 | bnx2x_mc_assert(bp); | 2234 | bnx2x_fw_dump(bp); |
1822 | 2235 | ||
1823 | } else | 2236 | } else |
1824 | BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); | 2237 | BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); |
1825 | } | 2238 | } |
1826 | 2239 | ||
1827 | if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { | 2240 | if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { |
1828 | 2241 | BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); | |
2242 | if (attn & BNX2X_GRC_TIMEOUT) { | ||
2243 | val = CHIP_IS_E1H(bp) ? | ||
2244 | REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0; | ||
2245 | BNX2X_ERR("GRC time-out 0x%08x\n", val); | ||
2246 | } | ||
2247 | if (attn & BNX2X_GRC_RSV) { | ||
2248 | val = CHIP_IS_E1H(bp) ? | ||
2249 | REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0; | ||
2250 | BNX2X_ERR("GRC reserved 0x%08x\n", val); | ||
2251 | } | ||
1829 | REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); | 2252 | REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); |
1830 | BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn); | ||
1831 | } | 2253 | } |
1832 | } | 2254 | } |
1833 | 2255 | ||
@@ -1835,7 +2257,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
1835 | { | 2257 | { |
1836 | struct attn_route attn; | 2258 | struct attn_route attn; |
1837 | struct attn_route group_mask; | 2259 | struct attn_route group_mask; |
1838 | int port = bp->port; | 2260 | int port = BP_PORT(bp); |
1839 | int index; | 2261 | int index; |
1840 | u32 reg_addr; | 2262 | u32 reg_addr; |
1841 | u32 val; | 2263 | u32 val; |
@@ -1848,14 +2270,16 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
1848 | attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); | 2270 | attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); |
1849 | attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); | 2271 | attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); |
1850 | attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); | 2272 | attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); |
1851 | DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]); | 2273 | DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n", |
2274 | attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]); | ||
1852 | 2275 | ||
1853 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { | 2276 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
1854 | if (deasserted & (1 << index)) { | 2277 | if (deasserted & (1 << index)) { |
1855 | group_mask = bp->attn_group[index]; | 2278 | group_mask = bp->attn_group[index]; |
1856 | 2279 | ||
1857 | DP(NETIF_MSG_HW, "group[%d]: %llx\n", index, | 2280 | DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", |
1858 | (unsigned long long)group_mask.sig[0]); | 2281 | index, group_mask.sig[0], group_mask.sig[1], |
2282 | group_mask.sig[2], group_mask.sig[3]); | ||
1859 | 2283 | ||
1860 | bnx2x_attn_int_deasserted3(bp, | 2284 | bnx2x_attn_int_deasserted3(bp, |
1861 | attn.sig[3] & group_mask.sig[3]); | 2285 | attn.sig[3] & group_mask.sig[3]); |
@@ -1867,22 +2291,6 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
1867 | attn.sig[0] & group_mask.sig[0]); | 2291 | attn.sig[0] & group_mask.sig[0]); |
1868 | 2292 | ||
1869 | if ((attn.sig[0] & group_mask.sig[0] & | 2293 | if ((attn.sig[0] & group_mask.sig[0] & |
1870 | HW_INTERRUT_ASSERT_SET_0) || | ||
1871 | (attn.sig[1] & group_mask.sig[1] & | ||
1872 | HW_INTERRUT_ASSERT_SET_1) || | ||
1873 | (attn.sig[2] & group_mask.sig[2] & | ||
1874 | HW_INTERRUT_ASSERT_SET_2)) | ||
1875 | BNX2X_ERR("FATAL HW block attention" | ||
1876 | " set0 0x%x set1 0x%x" | ||
1877 | " set2 0x%x\n", | ||
1878 | (attn.sig[0] & group_mask.sig[0] & | ||
1879 | HW_INTERRUT_ASSERT_SET_0), | ||
1880 | (attn.sig[1] & group_mask.sig[1] & | ||
1881 | HW_INTERRUT_ASSERT_SET_1), | ||
1882 | (attn.sig[2] & group_mask.sig[2] & | ||
1883 | HW_INTERRUT_ASSERT_SET_2)); | ||
1884 | |||
1885 | if ((attn.sig[0] & group_mask.sig[0] & | ||
1886 | HW_PRTY_ASSERT_SET_0) || | 2294 | HW_PRTY_ASSERT_SET_0) || |
1887 | (attn.sig[1] & group_mask.sig[1] & | 2295 | (attn.sig[1] & group_mask.sig[1] & |
1888 | HW_PRTY_ASSERT_SET_1) || | 2296 | HW_PRTY_ASSERT_SET_1) || |
@@ -1894,17 +2302,17 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
1894 | 2302 | ||
1895 | bnx2x_unlock_alr(bp); | 2303 | bnx2x_unlock_alr(bp); |
1896 | 2304 | ||
1897 | reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8; | 2305 | reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; |
1898 | 2306 | ||
1899 | val = ~deasserted; | 2307 | val = ~deasserted; |
1900 | /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", | 2308 | /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", |
1901 | val, BAR_IGU_INTMEM + reg_addr); */ | 2309 | val, BAR_IGU_INTMEM + reg_addr); */ |
1902 | REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val); | 2310 | REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val); |
1903 | 2311 | ||
1904 | if (bp->aeu_mask & (deasserted & 0xff)) | 2312 | if (bp->aeu_mask & (deasserted & 0xff)) |
1905 | BNX2X_ERR("IGU BUG\n"); | 2313 | BNX2X_ERR("IGU BUG!\n"); |
1906 | if (~bp->attn_state & deasserted) | 2314 | if (~bp->attn_state & deasserted) |
1907 | BNX2X_ERR("IGU BUG\n"); | 2315 | BNX2X_ERR("IGU BUG!\n"); |
1908 | 2316 | ||
1909 | reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : | 2317 | reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : |
1910 | MISC_REG_AEU_MASK_ATTN_FUNC_0; | 2318 | MISC_REG_AEU_MASK_ATTN_FUNC_0; |
@@ -1936,7 +2344,7 @@ static void bnx2x_attn_int(struct bnx2x *bp) | |||
1936 | attn_bits, attn_ack, asserted, deasserted); | 2344 | attn_bits, attn_ack, asserted, deasserted); |
1937 | 2345 | ||
1938 | if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) | 2346 | if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) |
1939 | BNX2X_ERR("bad attention state\n"); | 2347 | BNX2X_ERR("BAD attention state\n"); |
1940 | 2348 | ||
1941 | /* handle bits that were raised */ | 2349 | /* handle bits that were raised */ |
1942 | if (asserted) | 2350 | if (asserted) |
@@ -1951,6 +2359,7 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
1951 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task); | 2359 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task); |
1952 | u16 status; | 2360 | u16 status; |
1953 | 2361 | ||
2362 | |||
1954 | /* Return here if interrupt is disabled */ | 2363 | /* Return here if interrupt is disabled */ |
1955 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | 2364 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { |
1956 | DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); | 2365 | DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); |
@@ -1958,19 +2367,15 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
1958 | } | 2367 | } |
1959 | 2368 | ||
1960 | status = bnx2x_update_dsb_idx(bp); | 2369 | status = bnx2x_update_dsb_idx(bp); |
1961 | if (status == 0) | 2370 | /* if (status == 0) */ |
1962 | BNX2X_ERR("spurious slowpath interrupt!\n"); | 2371 | /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ |
1963 | 2372 | ||
1964 | DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status); | 2373 | DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status); |
1965 | 2374 | ||
1966 | /* HW attentions */ | 2375 | /* HW attentions */ |
1967 | if (status & 0x1) | 2376 | if (status & 0x1) |
1968 | bnx2x_attn_int(bp); | 2377 | bnx2x_attn_int(bp); |
1969 | 2378 | ||
1970 | /* CStorm events: query_stats, port delete ramrod */ | ||
1971 | if (status & 0x2) | ||
1972 | bp->stat_pending = 0; | ||
1973 | |||
1974 | bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx, | 2379 | bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx, |
1975 | IGU_INT_NOP, 1); | 2380 | IGU_INT_NOP, 1); |
1976 | bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx), | 2381 | bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx), |
@@ -2109,13 +2514,13 @@ static inline long bnx2x_hilo(u32 *hiref) | |||
2109 | static void bnx2x_init_mac_stats(struct bnx2x *bp) | 2514 | static void bnx2x_init_mac_stats(struct bnx2x *bp) |
2110 | { | 2515 | { |
2111 | struct dmae_command *dmae; | 2516 | struct dmae_command *dmae; |
2112 | int port = bp->port; | 2517 | int port = BP_PORT(bp); |
2113 | int loader_idx = port * 8; | 2518 | int loader_idx = port * 8; |
2114 | u32 opcode; | 2519 | u32 opcode; |
2115 | u32 mac_addr; | 2520 | u32 mac_addr; |
2116 | 2521 | ||
2117 | bp->executer_idx = 0; | 2522 | bp->executer_idx = 0; |
2118 | if (bp->fw_mb) { | 2523 | if (bp->func_stx) { |
2119 | /* MCP */ | 2524 | /* MCP */ |
2120 | opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | | 2525 | opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | |
2121 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | | 2526 | DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | |
@@ -2135,7 +2540,7 @@ static void bnx2x_init_mac_stats(struct bnx2x *bp) | |||
2135 | sizeof(u32)); | 2540 | sizeof(u32)); |
2136 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) + | 2541 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) + |
2137 | sizeof(u32)); | 2542 | sizeof(u32)); |
2138 | dmae->dst_addr_lo = bp->fw_mb >> 2; | 2543 | dmae->dst_addr_lo = bp->func_stx >> 2; |
2139 | dmae->dst_addr_hi = 0; | 2544 | dmae->dst_addr_hi = 0; |
2140 | dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) - | 2545 | dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) - |
2141 | sizeof(u32)) >> 2; | 2546 | sizeof(u32)) >> 2; |
@@ -2280,7 +2685,7 @@ static void bnx2x_init_mac_stats(struct bnx2x *bp) | |||
2280 | 2685 | ||
2281 | static void bnx2x_init_stats(struct bnx2x *bp) | 2686 | static void bnx2x_init_stats(struct bnx2x *bp) |
2282 | { | 2687 | { |
2283 | int port = bp->port; | 2688 | int port = BP_PORT(bp); |
2284 | 2689 | ||
2285 | bp->stats_state = STATS_STATE_DISABLE; | 2690 | bp->stats_state = STATS_STATE_DISABLE; |
2286 | bp->executer_idx = 0; | 2691 | bp->executer_idx = 0; |
@@ -2641,8 +3046,6 @@ static void bnx2x_update_net_stats(struct bnx2x *bp) | |||
2641 | 3046 | ||
2642 | static void bnx2x_update_stats(struct bnx2x *bp) | 3047 | static void bnx2x_update_stats(struct bnx2x *bp) |
2643 | { | 3048 | { |
2644 | int i; | ||
2645 | |||
2646 | if (!bnx2x_update_storm_stats(bp)) { | 3049 | if (!bnx2x_update_storm_stats(bp)) { |
2647 | 3050 | ||
2648 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { | 3051 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { |
@@ -2662,6 +3065,7 @@ static void bnx2x_update_stats(struct bnx2x *bp) | |||
2662 | if (bp->msglevel & NETIF_MSG_TIMER) { | 3065 | if (bp->msglevel & NETIF_MSG_TIMER) { |
2663 | struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats); | 3066 | struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats); |
2664 | struct net_device_stats *nstats = &bp->dev->stats; | 3067 | struct net_device_stats *nstats = &bp->dev->stats; |
3068 | int i; | ||
2665 | 3069 | ||
2666 | printk(KERN_DEBUG "%s:\n", bp->dev->name); | 3070 | printk(KERN_DEBUG "%s:\n", bp->dev->name); |
2667 | printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" | 3071 | printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" |
@@ -2707,7 +3111,7 @@ static void bnx2x_update_stats(struct bnx2x *bp) | |||
2707 | /* loader */ | 3111 | /* loader */ |
2708 | if (bp->executer_idx) { | 3112 | if (bp->executer_idx) { |
2709 | struct dmae_command *dmae = &bp->dmae; | 3113 | struct dmae_command *dmae = &bp->dmae; |
2710 | int port = bp->port; | 3114 | int port = BP_PORT(bp); |
2711 | int loader_idx = port * 8; | 3115 | int loader_idx = port * 8; |
2712 | 3116 | ||
2713 | memset(dmae, 0, sizeof(struct dmae_command)); | 3117 | memset(dmae, 0, sizeof(struct dmae_command)); |
@@ -2766,8 +3170,8 @@ static void bnx2x_timer(unsigned long data) | |||
2766 | rc = bnx2x_rx_int(fp, 1000); | 3170 | rc = bnx2x_rx_int(fp, 1000); |
2767 | } | 3171 | } |
2768 | 3172 | ||
2769 | if (!nomcp) { | 3173 | if (!BP_NOMCP(bp)) { |
2770 | int port = bp->port; | 3174 | int func = BP_FUNC(bp); |
2771 | u32 drv_pulse; | 3175 | u32 drv_pulse; |
2772 | u32 mcp_pulse; | 3176 | u32 mcp_pulse; |
2773 | 3177 | ||
@@ -2775,9 +3179,9 @@ static void bnx2x_timer(unsigned long data) | |||
2775 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; | 3179 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; |
2776 | /* TBD - add SYSTEM_TIME */ | 3180 | /* TBD - add SYSTEM_TIME */ |
2777 | drv_pulse = bp->fw_drv_pulse_wr_seq; | 3181 | drv_pulse = bp->fw_drv_pulse_wr_seq; |
2778 | SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse); | 3182 | SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse); |
2779 | 3183 | ||
2780 | mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) & | 3184 | mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) & |
2781 | MCP_PULSE_SEQ_MASK); | 3185 | MCP_PULSE_SEQ_MASK); |
2782 | /* The delta between driver pulse and mcp response | 3186 | /* The delta between driver pulse and mcp response |
2783 | * should be 1 (before mcp response) or 0 (after mcp response) | 3187 | * should be 1 (before mcp response) or 0 (after mcp response) |
@@ -2807,58 +3211,89 @@ timer_restart: | |||
2807 | * nic init service functions | 3211 | * nic init service functions |
2808 | */ | 3212 | */ |
2809 | 3213 | ||
2810 | static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, | 3214 | static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) |
2811 | dma_addr_t mapping, int id) | ||
2812 | { | 3215 | { |
2813 | int port = bp->port; | 3216 | int port = BP_PORT(bp); |
2814 | u64 section; | 3217 | |
3218 | bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + | ||
3219 | USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, | ||
3220 | sizeof(struct ustorm_def_status_block)/4); | ||
3221 | bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + | ||
3222 | CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, | ||
3223 | sizeof(struct cstorm_def_status_block)/4); | ||
3224 | } | ||
3225 | |||
3226 | static void bnx2x_init_sb(struct bnx2x *bp, int sb_id, | ||
3227 | struct host_status_block *sb, dma_addr_t mapping) | ||
3228 | { | ||
3229 | int port = BP_PORT(bp); | ||
2815 | int index; | 3230 | int index; |
3231 | u64 section; | ||
2816 | 3232 | ||
2817 | /* USTORM */ | 3233 | /* USTORM */ |
2818 | section = ((u64)mapping) + offsetof(struct host_status_block, | 3234 | section = ((u64)mapping) + offsetof(struct host_status_block, |
2819 | u_status_block); | 3235 | u_status_block); |
2820 | sb->u_status_block.status_block_id = id; | 3236 | sb->u_status_block.status_block_id = sb_id; |
2821 | 3237 | ||
2822 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3238 | REG_WR(bp, BAR_USTRORM_INTMEM + |
2823 | USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section)); | 3239 | USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section)); |
2824 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3240 | REG_WR(bp, BAR_USTRORM_INTMEM + |
2825 | ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4), | 3241 | ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4), |
2826 | U64_HI(section)); | 3242 | U64_HI(section)); |
2827 | 3243 | ||
2828 | for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++) | 3244 | for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++) |
2829 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 3245 | REG_WR16(bp, BAR_USTRORM_INTMEM + |
2830 | USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1); | 3246 | USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1); |
2831 | 3247 | ||
2832 | /* CSTORM */ | 3248 | /* CSTORM */ |
2833 | section = ((u64)mapping) + offsetof(struct host_status_block, | 3249 | section = ((u64)mapping) + offsetof(struct host_status_block, |
2834 | c_status_block); | 3250 | c_status_block); |
2835 | sb->c_status_block.status_block_id = id; | 3251 | sb->c_status_block.status_block_id = sb_id; |
2836 | 3252 | ||
2837 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 3253 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
2838 | CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section)); | 3254 | CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section)); |
2839 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 3255 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
2840 | ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4), | 3256 | ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4), |
2841 | U64_HI(section)); | 3257 | U64_HI(section)); |
2842 | 3258 | ||
2843 | for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) | 3259 | for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) |
2844 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 3260 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
2845 | CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1); | 3261 | CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1); |
2846 | 3262 | ||
2847 | bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 3263 | bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
3264 | } | ||
3265 | |||
3266 | static void bnx2x_zero_def_sb(struct bnx2x *bp) | ||
3267 | { | ||
3268 | int func = BP_FUNC(bp); | ||
3269 | |||
3270 | bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + | ||
3271 | USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | ||
3272 | sizeof(struct ustorm_def_status_block)/4); | ||
3273 | bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + | ||
3274 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | ||
3275 | sizeof(struct cstorm_def_status_block)/4); | ||
3276 | bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM + | ||
3277 | XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | ||
3278 | sizeof(struct xstorm_def_status_block)/4); | ||
3279 | bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM + | ||
3280 | TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | ||
3281 | sizeof(struct tstorm_def_status_block)/4); | ||
2848 | } | 3282 | } |
2849 | 3283 | ||
2850 | static void bnx2x_init_def_sb(struct bnx2x *bp, | 3284 | static void bnx2x_init_def_sb(struct bnx2x *bp, |
2851 | struct host_def_status_block *def_sb, | 3285 | struct host_def_status_block *def_sb, |
2852 | dma_addr_t mapping, int id) | 3286 | dma_addr_t mapping, int sb_id) |
2853 | { | 3287 | { |
2854 | int port = bp->port; | 3288 | int port = BP_PORT(bp); |
3289 | int func = BP_FUNC(bp); | ||
2855 | int index, val, reg_offset; | 3290 | int index, val, reg_offset; |
2856 | u64 section; | 3291 | u64 section; |
2857 | 3292 | ||
2858 | /* ATTN */ | 3293 | /* ATTN */ |
2859 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 3294 | section = ((u64)mapping) + offsetof(struct host_def_status_block, |
2860 | atten_status_block); | 3295 | atten_status_block); |
2861 | def_sb->atten_status_block.status_block_id = id; | 3296 | def_sb->atten_status_block.status_block_id = sb_id; |
2862 | 3297 | ||
2863 | bp->def_att_idx = 0; | 3298 | bp->def_att_idx = 0; |
2864 | bp->attn_state = 0; | 3299 | bp->attn_state = 0; |
@@ -2866,7 +3301,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
2866 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 3301 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
2867 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 3302 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
2868 | 3303 | ||
2869 | for (index = 0; index < 3; index++) { | 3304 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
2870 | bp->attn_group[index].sig[0] = REG_RD(bp, | 3305 | bp->attn_group[index].sig[0] = REG_RD(bp, |
2871 | reg_offset + 0x10*index); | 3306 | reg_offset + 0x10*index); |
2872 | bp->attn_group[index].sig[1] = REG_RD(bp, | 3307 | bp->attn_group[index].sig[1] = REG_RD(bp, |
@@ -2889,116 +3324,123 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
2889 | reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0); | 3324 | reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0); |
2890 | 3325 | ||
2891 | val = REG_RD(bp, reg_offset); | 3326 | val = REG_RD(bp, reg_offset); |
2892 | val |= id; | 3327 | val |= sb_id; |
2893 | REG_WR(bp, reg_offset, val); | 3328 | REG_WR(bp, reg_offset, val); |
2894 | 3329 | ||
2895 | /* USTORM */ | 3330 | /* USTORM */ |
2896 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 3331 | section = ((u64)mapping) + offsetof(struct host_def_status_block, |
2897 | u_def_status_block); | 3332 | u_def_status_block); |
2898 | def_sb->u_def_status_block.status_block_id = id; | 3333 | def_sb->u_def_status_block.status_block_id = sb_id; |
2899 | 3334 | ||
2900 | bp->def_u_idx = 0; | 3335 | bp->def_u_idx = 0; |
2901 | 3336 | ||
2902 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3337 | REG_WR(bp, BAR_USTRORM_INTMEM + |
2903 | USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); | 3338 | USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); |
2904 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3339 | REG_WR(bp, BAR_USTRORM_INTMEM + |
2905 | ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4), | 3340 | ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), |
2906 | U64_HI(section)); | 3341 | U64_HI(section)); |
2907 | REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), | 3342 | REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + |
3343 | USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | ||
3344 | REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func), | ||
2908 | BNX2X_BTR); | 3345 | BNX2X_BTR); |
2909 | 3346 | ||
2910 | for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) | 3347 | for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) |
2911 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 3348 | REG_WR16(bp, BAR_USTRORM_INTMEM + |
2912 | USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1); | 3349 | USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); |
2913 | 3350 | ||
2914 | /* CSTORM */ | 3351 | /* CSTORM */ |
2915 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 3352 | section = ((u64)mapping) + offsetof(struct host_def_status_block, |
2916 | c_def_status_block); | 3353 | c_def_status_block); |
2917 | def_sb->c_def_status_block.status_block_id = id; | 3354 | def_sb->c_def_status_block.status_block_id = sb_id; |
2918 | 3355 | ||
2919 | bp->def_c_idx = 0; | 3356 | bp->def_c_idx = 0; |
2920 | 3357 | ||
2921 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 3358 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
2922 | CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); | 3359 | CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); |
2923 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 3360 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
2924 | ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4), | 3361 | ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), |
2925 | U64_HI(section)); | 3362 | U64_HI(section)); |
2926 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), | 3363 | REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + |
3364 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | ||
3365 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func), | ||
2927 | BNX2X_BTR); | 3366 | BNX2X_BTR); |
2928 | 3367 | ||
2929 | for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) | 3368 | for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) |
2930 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 3369 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
2931 | CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1); | 3370 | CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); |
2932 | 3371 | ||
2933 | /* TSTORM */ | 3372 | /* TSTORM */ |
2934 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 3373 | section = ((u64)mapping) + offsetof(struct host_def_status_block, |
2935 | t_def_status_block); | 3374 | t_def_status_block); |
2936 | def_sb->t_def_status_block.status_block_id = id; | 3375 | def_sb->t_def_status_block.status_block_id = sb_id; |
2937 | 3376 | ||
2938 | bp->def_t_idx = 0; | 3377 | bp->def_t_idx = 0; |
2939 | 3378 | ||
2940 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3379 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
2941 | TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); | 3380 | TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); |
2942 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3381 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
2943 | ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4), | 3382 | ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), |
2944 | U64_HI(section)); | 3383 | U64_HI(section)); |
2945 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), | 3384 | REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + |
3385 | TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | ||
3386 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func), | ||
2946 | BNX2X_BTR); | 3387 | BNX2X_BTR); |
2947 | 3388 | ||
2948 | for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) | 3389 | for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) |
2949 | REG_WR16(bp, BAR_TSTRORM_INTMEM + | 3390 | REG_WR16(bp, BAR_TSTRORM_INTMEM + |
2950 | TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1); | 3391 | TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); |
2951 | 3392 | ||
2952 | /* XSTORM */ | 3393 | /* XSTORM */ |
2953 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 3394 | section = ((u64)mapping) + offsetof(struct host_def_status_block, |
2954 | x_def_status_block); | 3395 | x_def_status_block); |
2955 | def_sb->x_def_status_block.status_block_id = id; | 3396 | def_sb->x_def_status_block.status_block_id = sb_id; |
2956 | 3397 | ||
2957 | bp->def_x_idx = 0; | 3398 | bp->def_x_idx = 0; |
2958 | 3399 | ||
2959 | REG_WR(bp, BAR_XSTRORM_INTMEM + | 3400 | REG_WR(bp, BAR_XSTRORM_INTMEM + |
2960 | XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); | 3401 | XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); |
2961 | REG_WR(bp, BAR_XSTRORM_INTMEM + | 3402 | REG_WR(bp, BAR_XSTRORM_INTMEM + |
2962 | ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4), | 3403 | ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), |
2963 | U64_HI(section)); | 3404 | U64_HI(section)); |
2964 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), | 3405 | REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + |
3406 | XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | ||
3407 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func), | ||
2965 | BNX2X_BTR); | 3408 | BNX2X_BTR); |
2966 | 3409 | ||
2967 | for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) | 3410 | for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) |
2968 | REG_WR16(bp, BAR_XSTRORM_INTMEM + | 3411 | REG_WR16(bp, BAR_XSTRORM_INTMEM + |
2969 | XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1); | 3412 | XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); |
2970 | 3413 | ||
2971 | bp->stat_pending = 0; | 3414 | bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
2972 | |||
2973 | bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | ||
2974 | } | 3415 | } |
2975 | 3416 | ||
2976 | static void bnx2x_update_coalesce(struct bnx2x *bp) | 3417 | static void bnx2x_update_coalesce(struct bnx2x *bp) |
2977 | { | 3418 | { |
2978 | int port = bp->port; | 3419 | int port = BP_PORT(bp); |
2979 | int i; | 3420 | int i; |
2980 | 3421 | ||
2981 | for_each_queue(bp, i) { | 3422 | for_each_queue(bp, i) { |
3423 | int sb_id = bp->fp[i].sb_id; | ||
2982 | 3424 | ||
2983 | /* HC_INDEX_U_ETH_RX_CQ_CONS */ | 3425 | /* HC_INDEX_U_ETH_RX_CQ_CONS */ |
2984 | REG_WR8(bp, BAR_USTRORM_INTMEM + | 3426 | REG_WR8(bp, BAR_USTRORM_INTMEM + |
2985 | USTORM_SB_HC_TIMEOUT_OFFSET(port, i, | 3427 | USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, |
2986 | HC_INDEX_U_ETH_RX_CQ_CONS), | 3428 | HC_INDEX_U_ETH_RX_CQ_CONS), |
2987 | bp->rx_ticks_int/12); | 3429 | bp->rx_ticks/12); |
2988 | REG_WR16(bp, BAR_USTRORM_INTMEM + | 3430 | REG_WR16(bp, BAR_USTRORM_INTMEM + |
2989 | USTORM_SB_HC_DISABLE_OFFSET(port, i, | 3431 | USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, |
2990 | HC_INDEX_U_ETH_RX_CQ_CONS), | 3432 | HC_INDEX_U_ETH_RX_CQ_CONS), |
2991 | bp->rx_ticks_int ? 0 : 1); | 3433 | bp->rx_ticks ? 0 : 1); |
2992 | 3434 | ||
2993 | /* HC_INDEX_C_ETH_TX_CQ_CONS */ | 3435 | /* HC_INDEX_C_ETH_TX_CQ_CONS */ |
2994 | REG_WR8(bp, BAR_CSTRORM_INTMEM + | 3436 | REG_WR8(bp, BAR_CSTRORM_INTMEM + |
2995 | CSTORM_SB_HC_TIMEOUT_OFFSET(port, i, | 3437 | CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, |
2996 | HC_INDEX_C_ETH_TX_CQ_CONS), | 3438 | HC_INDEX_C_ETH_TX_CQ_CONS), |
2997 | bp->tx_ticks_int/12); | 3439 | bp->tx_ticks/12); |
2998 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 3440 | REG_WR16(bp, BAR_CSTRORM_INTMEM + |
2999 | CSTORM_SB_HC_DISABLE_OFFSET(port, i, | 3441 | CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, |
3000 | HC_INDEX_C_ETH_TX_CQ_CONS), | 3442 | HC_INDEX_C_ETH_TX_CQ_CONS), |
3001 | bp->tx_ticks_int ? 0 : 1); | 3443 | bp->tx_ticks ? 0 : 1); |
3002 | } | 3444 | } |
3003 | } | 3445 | } |
3004 | 3446 | ||
@@ -3006,7 +3448,6 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
3006 | { | 3448 | { |
3007 | u16 ring_prod; | 3449 | u16 ring_prod; |
3008 | int i, j; | 3450 | int i, j; |
3009 | int port = bp->port; | ||
3010 | 3451 | ||
3011 | bp->rx_buf_use_size = bp->dev->mtu; | 3452 | bp->rx_buf_use_size = bp->dev->mtu; |
3012 | 3453 | ||
@@ -3025,13 +3466,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
3025 | rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; | 3466 | rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; |
3026 | rx_bd->addr_hi = | 3467 | rx_bd->addr_hi = |
3027 | cpu_to_le32(U64_HI(fp->rx_desc_mapping + | 3468 | cpu_to_le32(U64_HI(fp->rx_desc_mapping + |
3028 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); | 3469 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); |
3029 | rx_bd->addr_lo = | 3470 | rx_bd->addr_lo = |
3030 | cpu_to_le32(U64_LO(fp->rx_desc_mapping + | 3471 | cpu_to_le32(U64_LO(fp->rx_desc_mapping + |
3031 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); | 3472 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); |
3032 | |||
3033 | } | 3473 | } |
3034 | 3474 | ||
3475 | /* CQ ring */ | ||
3035 | for (i = 1; i <= NUM_RCQ_RINGS; i++) { | 3476 | for (i = 1; i <= NUM_RCQ_RINGS; i++) { |
3036 | struct eth_rx_cqe_next_page *nextpg; | 3477 | struct eth_rx_cqe_next_page *nextpg; |
3037 | 3478 | ||
@@ -3039,10 +3480,10 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
3039 | &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; | 3480 | &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; |
3040 | nextpg->addr_hi = | 3481 | nextpg->addr_hi = |
3041 | cpu_to_le32(U64_HI(fp->rx_comp_mapping + | 3482 | cpu_to_le32(U64_HI(fp->rx_comp_mapping + |
3042 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); | 3483 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); |
3043 | nextpg->addr_lo = | 3484 | nextpg->addr_lo = |
3044 | cpu_to_le32(U64_LO(fp->rx_comp_mapping + | 3485 | cpu_to_le32(U64_LO(fp->rx_comp_mapping + |
3045 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); | 3486 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); |
3046 | } | 3487 | } |
3047 | 3488 | ||
3048 | /* rx completion queue */ | 3489 | /* rx completion queue */ |
@@ -3064,15 +3505,16 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
3064 | /* Warning! this will generate an interrupt (to the TSTORM) */ | 3505 | /* Warning! this will generate an interrupt (to the TSTORM) */ |
3065 | /* must only be done when chip is initialized */ | 3506 | /* must only be done when chip is initialized */ |
3066 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3507 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
3067 | TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod); | 3508 | TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)), |
3509 | ring_prod); | ||
3068 | if (j != 0) | 3510 | if (j != 0) |
3069 | continue; | 3511 | continue; |
3070 | 3512 | ||
3071 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3513 | REG_WR(bp, BAR_USTRORM_INTMEM + |
3072 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port), | 3514 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(BP_PORT(bp)), |
3073 | U64_LO(fp->rx_comp_mapping)); | 3515 | U64_LO(fp->rx_comp_mapping)); |
3074 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3516 | REG_WR(bp, BAR_USTRORM_INTMEM + |
3075 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4, | 3517 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(BP_PORT(bp)) + 4, |
3076 | U64_HI(fp->rx_comp_mapping)); | 3518 | U64_HI(fp->rx_comp_mapping)); |
3077 | } | 3519 | } |
3078 | } | 3520 | } |
@@ -3090,10 +3532,10 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp) | |||
3090 | 3532 | ||
3091 | tx_bd->addr_hi = | 3533 | tx_bd->addr_hi = |
3092 | cpu_to_le32(U64_HI(fp->tx_desc_mapping + | 3534 | cpu_to_le32(U64_HI(fp->tx_desc_mapping + |
3093 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); | 3535 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); |
3094 | tx_bd->addr_lo = | 3536 | tx_bd->addr_lo = |
3095 | cpu_to_le32(U64_LO(fp->tx_desc_mapping + | 3537 | cpu_to_le32(U64_LO(fp->tx_desc_mapping + |
3096 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); | 3538 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); |
3097 | } | 3539 | } |
3098 | 3540 | ||
3099 | fp->tx_pkt_prod = 0; | 3541 | fp->tx_pkt_prod = 0; |
@@ -3107,7 +3549,7 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp) | |||
3107 | 3549 | ||
3108 | static void bnx2x_init_sp_ring(struct bnx2x *bp) | 3550 | static void bnx2x_init_sp_ring(struct bnx2x *bp) |
3109 | { | 3551 | { |
3110 | int port = bp->port; | 3552 | int func = BP_FUNC(bp); |
3111 | 3553 | ||
3112 | spin_lock_init(&bp->spq_lock); | 3554 | spin_lock_init(&bp->spq_lock); |
3113 | 3555 | ||
@@ -3117,12 +3559,13 @@ static void bnx2x_init_sp_ring(struct bnx2x *bp) | |||
3117 | bp->spq_prod_bd = bp->spq; | 3559 | bp->spq_prod_bd = bp->spq; |
3118 | bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; | 3560 | bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; |
3119 | 3561 | ||
3120 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port), | 3562 | REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func), |
3121 | U64_LO(bp->spq_mapping)); | 3563 | U64_LO(bp->spq_mapping)); |
3122 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4, | 3564 | REG_WR(bp, |
3565 | XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4, | ||
3123 | U64_HI(bp->spq_mapping)); | 3566 | U64_HI(bp->spq_mapping)); |
3124 | 3567 | ||
3125 | REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port), | 3568 | REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func), |
3126 | bp->spq_prod_idx); | 3569 | bp->spq_prod_idx); |
3127 | } | 3570 | } |
3128 | 3571 | ||
@@ -3133,6 +3576,7 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
3133 | for_each_queue(bp, i) { | 3576 | for_each_queue(bp, i) { |
3134 | struct eth_context *context = bnx2x_sp(bp, context[i].eth); | 3577 | struct eth_context *context = bnx2x_sp(bp, context[i].eth); |
3135 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 3578 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
3579 | u8 sb_id = FP_SB_ID(fp); | ||
3136 | 3580 | ||
3137 | context->xstorm_st_context.tx_bd_page_base_hi = | 3581 | context->xstorm_st_context.tx_bd_page_base_hi = |
3138 | U64_HI(fp->tx_desc_mapping); | 3582 | U64_HI(fp->tx_desc_mapping); |
@@ -3142,26 +3586,25 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
3142 | U64_HI(fp->tx_prods_mapping); | 3586 | U64_HI(fp->tx_prods_mapping); |
3143 | context->xstorm_st_context.db_data_addr_lo = | 3587 | context->xstorm_st_context.db_data_addr_lo = |
3144 | U64_LO(fp->tx_prods_mapping); | 3588 | U64_LO(fp->tx_prods_mapping); |
3145 | 3589 | context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) | | |
3146 | context->ustorm_st_context.rx_bd_page_base_hi = | 3590 | XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE); |
3591 | |||
3592 | context->ustorm_st_context.common.sb_index_numbers = | ||
3593 | BNX2X_RX_SB_INDEX_NUM; | ||
3594 | context->ustorm_st_context.common.clientId = FP_CL_ID(fp); | ||
3595 | context->ustorm_st_context.common.status_block_id = sb_id; | ||
3596 | context->ustorm_st_context.common.flags = | ||
3597 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT; | ||
3598 | context->ustorm_st_context.common.mc_alignment_size = 64; | ||
3599 | context->ustorm_st_context.common.bd_buff_size = | ||
3600 | bp->rx_buf_use_size; | ||
3601 | context->ustorm_st_context.common.bd_page_base_hi = | ||
3147 | U64_HI(fp->rx_desc_mapping); | 3602 | U64_HI(fp->rx_desc_mapping); |
3148 | context->ustorm_st_context.rx_bd_page_base_lo = | 3603 | context->ustorm_st_context.common.bd_page_base_lo = |
3149 | U64_LO(fp->rx_desc_mapping); | 3604 | U64_LO(fp->rx_desc_mapping); |
3150 | context->ustorm_st_context.status_block_id = i; | ||
3151 | context->ustorm_st_context.sb_index_number = | ||
3152 | HC_INDEX_U_ETH_RX_CQ_CONS; | ||
3153 | context->ustorm_st_context.rcq_base_address_hi = | ||
3154 | U64_HI(fp->rx_comp_mapping); | ||
3155 | context->ustorm_st_context.rcq_base_address_lo = | ||
3156 | U64_LO(fp->rx_comp_mapping); | ||
3157 | context->ustorm_st_context.flags = | ||
3158 | USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT; | ||
3159 | context->ustorm_st_context.mc_alignment_size = 64; | ||
3160 | context->ustorm_st_context.num_rss = bp->num_queues; | ||
3161 | |||
3162 | context->cstorm_st_context.sb_index_number = | 3605 | context->cstorm_st_context.sb_index_number = |
3163 | HC_INDEX_C_ETH_TX_CQ_CONS; | 3606 | HC_INDEX_C_ETH_TX_CQ_CONS; |
3164 | context->cstorm_st_context.status_block_id = i; | 3607 | context->cstorm_st_context.status_block_id = sb_id; |
3165 | 3608 | ||
3166 | context->xstorm_ag_context.cdu_reserved = | 3609 | context->xstorm_ag_context.cdu_reserved = |
3167 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), | 3610 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), |
@@ -3176,14 +3619,16 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
3176 | 3619 | ||
3177 | static void bnx2x_init_ind_table(struct bnx2x *bp) | 3620 | static void bnx2x_init_ind_table(struct bnx2x *bp) |
3178 | { | 3621 | { |
3179 | int port = bp->port; | 3622 | int port = BP_PORT(bp); |
3180 | int i; | 3623 | int i; |
3181 | 3624 | ||
3182 | if (!is_multi(bp)) | 3625 | if (!is_multi(bp)) |
3183 | return; | 3626 | return; |
3184 | 3627 | ||
3628 | DP(NETIF_MSG_IFUP, "Initializing indirection table\n"); | ||
3185 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 3629 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) |
3186 | REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i, | 3630 | REG_WR8(bp, BAR_TSTRORM_INTMEM + |
3631 | TSTORM_INDIRECTION_TABLE_OFFSET(port) + i, | ||
3187 | i % bp->num_queues); | 3632 | i % bp->num_queues); |
3188 | 3633 | ||
3189 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); | 3634 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); |
@@ -3191,77 +3636,74 @@ static void bnx2x_init_ind_table(struct bnx2x *bp) | |||
3191 | 3636 | ||
3192 | static void bnx2x_set_client_config(struct bnx2x *bp) | 3637 | static void bnx2x_set_client_config(struct bnx2x *bp) |
3193 | { | 3638 | { |
3194 | #ifdef BCM_VLAN | ||
3195 | int mode = bp->rx_mode; | ||
3196 | #endif | ||
3197 | int i, port = bp->port; | ||
3198 | struct tstorm_eth_client_config tstorm_client = {0}; | 3639 | struct tstorm_eth_client_config tstorm_client = {0}; |
3640 | int port = BP_PORT(bp); | ||
3641 | int i; | ||
3199 | 3642 | ||
3200 | tstorm_client.mtu = bp->dev->mtu; | 3643 | tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; |
3201 | tstorm_client.statistics_counter_id = 0; | 3644 | tstorm_client.statistics_counter_id = 0; |
3202 | tstorm_client.config_flags = | 3645 | tstorm_client.config_flags = |
3203 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; | 3646 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; |
3204 | #ifdef BCM_VLAN | 3647 | #ifdef BCM_VLAN |
3205 | if (mode && bp->vlgrp) { | 3648 | if (bp->rx_mode && bp->vlgrp) { |
3206 | tstorm_client.config_flags |= | 3649 | tstorm_client.config_flags |= |
3207 | TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE; | 3650 | TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE; |
3208 | DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); | 3651 | DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); |
3209 | } | 3652 | } |
3210 | #endif | 3653 | #endif |
3211 | if (mode != BNX2X_RX_MODE_PROMISC) | ||
3212 | tstorm_client.drop_flags = | ||
3213 | TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR; | ||
3214 | 3654 | ||
3215 | for_each_queue(bp, i) { | 3655 | for_each_queue(bp, i) { |
3216 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3656 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
3217 | TSTORM_CLIENT_CONFIG_OFFSET(port, i), | 3657 | TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id), |
3218 | ((u32 *)&tstorm_client)[0]); | 3658 | ((u32 *)&tstorm_client)[0]); |
3219 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3659 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
3220 | TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4, | 3660 | TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4, |
3221 | ((u32 *)&tstorm_client)[1]); | 3661 | ((u32 *)&tstorm_client)[1]); |
3222 | } | 3662 | } |
3223 | 3663 | ||
3224 | /* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n", | 3664 | DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n", |
3225 | ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */ | 3665 | ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); |
3226 | } | 3666 | } |
3227 | 3667 | ||
3228 | static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | 3668 | static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) |
3229 | { | 3669 | { |
3230 | int mode = bp->rx_mode; | ||
3231 | int port = bp->port; | ||
3232 | struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; | 3670 | struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; |
3671 | int mode = bp->rx_mode; | ||
3672 | int mask = (1 << BP_L_ID(bp)); | ||
3673 | int func = BP_FUNC(bp); | ||
3233 | int i; | 3674 | int i; |
3234 | 3675 | ||
3235 | DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode); | 3676 | DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode); |
3236 | 3677 | ||
3237 | switch (mode) { | 3678 | switch (mode) { |
3238 | case BNX2X_RX_MODE_NONE: /* no Rx */ | 3679 | case BNX2X_RX_MODE_NONE: /* no Rx */ |
3239 | tstorm_mac_filter.ucast_drop_all = 1; | 3680 | tstorm_mac_filter.ucast_drop_all = mask; |
3240 | tstorm_mac_filter.mcast_drop_all = 1; | 3681 | tstorm_mac_filter.mcast_drop_all = mask; |
3241 | tstorm_mac_filter.bcast_drop_all = 1; | 3682 | tstorm_mac_filter.bcast_drop_all = mask; |
3242 | break; | 3683 | break; |
3243 | case BNX2X_RX_MODE_NORMAL: | 3684 | case BNX2X_RX_MODE_NORMAL: |
3244 | tstorm_mac_filter.bcast_accept_all = 1; | 3685 | tstorm_mac_filter.bcast_accept_all = mask; |
3245 | break; | 3686 | break; |
3246 | case BNX2X_RX_MODE_ALLMULTI: | 3687 | case BNX2X_RX_MODE_ALLMULTI: |
3247 | tstorm_mac_filter.mcast_accept_all = 1; | 3688 | tstorm_mac_filter.mcast_accept_all = mask; |
3248 | tstorm_mac_filter.bcast_accept_all = 1; | 3689 | tstorm_mac_filter.bcast_accept_all = mask; |
3249 | break; | 3690 | break; |
3250 | case BNX2X_RX_MODE_PROMISC: | 3691 | case BNX2X_RX_MODE_PROMISC: |
3251 | tstorm_mac_filter.ucast_accept_all = 1; | 3692 | tstorm_mac_filter.ucast_accept_all = mask; |
3252 | tstorm_mac_filter.mcast_accept_all = 1; | 3693 | tstorm_mac_filter.mcast_accept_all = mask; |
3253 | tstorm_mac_filter.bcast_accept_all = 1; | 3694 | tstorm_mac_filter.bcast_accept_all = mask; |
3254 | break; | 3695 | break; |
3255 | default: | 3696 | default: |
3256 | BNX2X_ERR("bad rx mode (%d)\n", mode); | 3697 | BNX2X_ERR("BAD rx mode (%d)\n", mode); |
3698 | break; | ||
3257 | } | 3699 | } |
3258 | 3700 | ||
3259 | for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) { | 3701 | for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) { |
3260 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3702 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
3261 | TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4, | 3703 | TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4, |
3262 | ((u32 *)&tstorm_mac_filter)[i]); | 3704 | ((u32 *)&tstorm_mac_filter)[i]); |
3263 | 3705 | ||
3264 | /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, | 3706 | /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, |
3265 | ((u32 *)&tstorm_mac_filter)[i]); */ | 3707 | ((u32 *)&tstorm_mac_filter)[i]); */ |
3266 | } | 3708 | } |
3267 | 3709 | ||
@@ -3271,26 +3713,30 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
3271 | 3713 | ||
3272 | static void bnx2x_init_internal(struct bnx2x *bp) | 3714 | static void bnx2x_init_internal(struct bnx2x *bp) |
3273 | { | 3715 | { |
3274 | int port = bp->port; | ||
3275 | struct tstorm_eth_function_common_config tstorm_config = {0}; | 3716 | struct tstorm_eth_function_common_config tstorm_config = {0}; |
3276 | struct stats_indication_flags stats_flags = {0}; | 3717 | struct stats_indication_flags stats_flags = {0}; |
3718 | int port = BP_PORT(bp); | ||
3719 | int func = BP_FUNC(bp); | ||
3720 | int i; | ||
3277 | 3721 | ||
3278 | if (is_multi(bp)) { | 3722 | if (is_multi(bp)) { |
3279 | tstorm_config.config_flags = MULTI_FLAGS; | 3723 | tstorm_config.config_flags = MULTI_FLAGS; |
3280 | tstorm_config.rss_result_mask = MULTI_MASK; | 3724 | tstorm_config.rss_result_mask = MULTI_MASK; |
3281 | } | 3725 | } |
3282 | 3726 | ||
3727 | tstorm_config.leading_client_id = BP_L_ID(bp); | ||
3728 | |||
3283 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3729 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
3284 | TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port), | 3730 | TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), |
3285 | (*(u32 *)&tstorm_config)); | 3731 | (*(u32 *)&tstorm_config)); |
3286 | 3732 | ||
3287 | /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n", | 3733 | /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n", |
3288 | (*(u32 *)&tstorm_config)); */ | 3734 | (*(u32 *)&tstorm_config)); */ |
3289 | 3735 | ||
3290 | bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ | 3736 | bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ |
3291 | bnx2x_set_storm_rx_mode(bp); | 3737 | bnx2x_set_storm_rx_mode(bp); |
3292 | 3738 | ||
3293 | stats_flags.collect_eth = cpu_to_le32(1); | 3739 | stats_flags.collect_eth = 1; |
3294 | 3740 | ||
3295 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), | 3741 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), |
3296 | ((u32 *)&stats_flags)[0]); | 3742 | ((u32 *)&stats_flags)[0]); |
@@ -3307,8 +3753,28 @@ static void bnx2x_init_internal(struct bnx2x *bp) | |||
3307 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4, | 3753 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4, |
3308 | ((u32 *)&stats_flags)[1]); | 3754 | ((u32 *)&stats_flags)[1]); |
3309 | 3755 | ||
3310 | /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n", | 3756 | /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n", |
3311 | ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */ | 3757 | ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */ |
3758 | |||
3759 | if (CHIP_IS_E1H(bp)) { | ||
3760 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, | ||
3761 | IS_E1HMF(bp)); | ||
3762 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET, | ||
3763 | IS_E1HMF(bp)); | ||
3764 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET, | ||
3765 | IS_E1HMF(bp)); | ||
3766 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET, | ||
3767 | IS_E1HMF(bp)); | ||
3768 | |||
3769 | REG_WR16(bp, BAR_XSTRORM_INTMEM + | ||
3770 | XSTORM_E1HOV_OFFSET(func), bp->e1hov); | ||
3771 | } | ||
3772 | |||
3773 | /* Zero this manualy as its initialization is | ||
3774 | currently missing in the initTool */ | ||
3775 | for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++) | ||
3776 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
3777 | USTORM_AGG_DATA_OFFSET + 4*i, 0); | ||
3312 | } | 3778 | } |
3313 | 3779 | ||
3314 | static void bnx2x_nic_init(struct bnx2x *bp) | 3780 | static void bnx2x_nic_init(struct bnx2x *bp) |
@@ -3318,15 +3784,20 @@ static void bnx2x_nic_init(struct bnx2x *bp) | |||
3318 | for_each_queue(bp, i) { | 3784 | for_each_queue(bp, i) { |
3319 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 3785 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
3320 | 3786 | ||
3787 | fp->bp = bp; | ||
3321 | fp->state = BNX2X_FP_STATE_CLOSED; | 3788 | fp->state = BNX2X_FP_STATE_CLOSED; |
3322 | DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n", | ||
3323 | bp, fp->status_blk, i); | ||
3324 | fp->index = i; | 3789 | fp->index = i; |
3325 | bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i); | 3790 | fp->cl_id = BP_L_ID(bp) + i; |
3791 | fp->sb_id = fp->cl_id; | ||
3792 | DP(NETIF_MSG_IFUP, | ||
3793 | "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n", | ||
3794 | bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp)); | ||
3795 | bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk, | ||
3796 | fp->status_blk_mapping); | ||
3326 | } | 3797 | } |
3327 | 3798 | ||
3328 | bnx2x_init_def_sb(bp, bp->def_status_blk, | 3799 | bnx2x_init_def_sb(bp, bp->def_status_blk, |
3329 | bp->def_status_blk_mapping, 0x10); | 3800 | bp->def_status_blk_mapping, DEF_SB_ID); |
3330 | bnx2x_update_coalesce(bp); | 3801 | bnx2x_update_coalesce(bp); |
3331 | bnx2x_init_rx_rings(bp); | 3802 | bnx2x_init_rx_rings(bp); |
3332 | bnx2x_init_tx_ring(bp); | 3803 | bnx2x_init_tx_ring(bp); |
@@ -3336,7 +3807,6 @@ static void bnx2x_nic_init(struct bnx2x *bp) | |||
3336 | bnx2x_init_stats(bp); | 3807 | bnx2x_init_stats(bp); |
3337 | bnx2x_init_ind_table(bp); | 3808 | bnx2x_init_ind_table(bp); |
3338 | bnx2x_int_enable(bp); | 3809 | bnx2x_int_enable(bp); |
3339 | |||
3340 | } | 3810 | } |
3341 | 3811 | ||
3342 | /* end of nic init */ | 3812 | /* end of nic init */ |
@@ -3374,7 +3844,7 @@ gunzip_nomem2: | |||
3374 | 3844 | ||
3375 | gunzip_nomem1: | 3845 | gunzip_nomem1: |
3376 | printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for" | 3846 | printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for" |
3377 | " uncompression\n", bp->dev->name); | 3847 | " un-compression\n", bp->dev->name); |
3378 | return -ENOMEM; | 3848 | return -ENOMEM; |
3379 | } | 3849 | } |
3380 | 3850 | ||
@@ -3402,7 +3872,7 @@ static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len) | |||
3402 | 3872 | ||
3403 | n = 10; | 3873 | n = 10; |
3404 | 3874 | ||
3405 | #define FNAME 0x8 | 3875 | #define FNAME 0x8 |
3406 | 3876 | ||
3407 | if (zbuf[3] & FNAME) | 3877 | if (zbuf[3] & FNAME) |
3408 | while ((zbuf[n++] != 0) && (n < len)); | 3878 | while ((zbuf[n++] != 0) && (n < len)); |
@@ -3439,41 +3909,25 @@ static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len) | |||
3439 | /* nic load/unload */ | 3909 | /* nic load/unload */ |
3440 | 3910 | ||
3441 | /* | 3911 | /* |
3442 | * general service functions | 3912 | * General service functions |
3443 | */ | 3913 | */ |
3444 | 3914 | ||
3445 | /* send a NIG loopback debug packet */ | 3915 | /* send a NIG loopback debug packet */ |
3446 | static void bnx2x_lb_pckt(struct bnx2x *bp) | 3916 | static void bnx2x_lb_pckt(struct bnx2x *bp) |
3447 | { | 3917 | { |
3448 | #ifdef USE_DMAE | ||
3449 | u32 wb_write[3]; | 3918 | u32 wb_write[3]; |
3450 | #endif | ||
3451 | 3919 | ||
3452 | /* Ethernet source and destination addresses */ | 3920 | /* Ethernet source and destination addresses */ |
3453 | #ifdef USE_DMAE | ||
3454 | wb_write[0] = 0x55555555; | 3921 | wb_write[0] = 0x55555555; |
3455 | wb_write[1] = 0x55555555; | 3922 | wb_write[1] = 0x55555555; |
3456 | wb_write[2] = 0x20; /* SOP */ | 3923 | wb_write[2] = 0x20; /* SOP */ |
3457 | REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); | 3924 | REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); |
3458 | #else | ||
3459 | REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555); | ||
3460 | REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555); | ||
3461 | /* SOP */ | ||
3462 | REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20); | ||
3463 | #endif | ||
3464 | 3925 | ||
3465 | /* NON-IP protocol */ | 3926 | /* NON-IP protocol */ |
3466 | #ifdef USE_DMAE | ||
3467 | wb_write[0] = 0x09000000; | 3927 | wb_write[0] = 0x09000000; |
3468 | wb_write[1] = 0x55555555; | 3928 | wb_write[1] = 0x55555555; |
3469 | wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ | 3929 | wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ |
3470 | REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); | 3930 | REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); |
3471 | #else | ||
3472 | REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000); | ||
3473 | REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555); | ||
3474 | /* EOP, eop_bvalid = 0 */ | ||
3475 | REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10); | ||
3476 | #endif | ||
3477 | } | 3931 | } |
3478 | 3932 | ||
3479 | /* some of the internal memories | 3933 | /* some of the internal memories |
@@ -3511,13 +3965,9 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
3511 | /* Wait until NIG register shows 1 packet of size 0x10 */ | 3965 | /* Wait until NIG register shows 1 packet of size 0x10 */ |
3512 | count = 1000 * factor; | 3966 | count = 1000 * factor; |
3513 | while (count) { | 3967 | while (count) { |
3514 | #ifdef BNX2X_DMAE_RD | 3968 | |
3515 | bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); | 3969 | bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); |
3516 | val = *bnx2x_sp(bp, wb_data[0]); | 3970 | val = *bnx2x_sp(bp, wb_data[0]); |
3517 | #else | ||
3518 | val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET); | ||
3519 | REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4); | ||
3520 | #endif | ||
3521 | if (val == 0x10) | 3971 | if (val == 0x10) |
3522 | break; | 3972 | break; |
3523 | 3973 | ||
@@ -3533,7 +3983,6 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
3533 | count = 1000 * factor; | 3983 | count = 1000 * factor; |
3534 | while (count) { | 3984 | while (count) { |
3535 | val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); | 3985 | val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); |
3536 | |||
3537 | if (val == 1) | 3986 | if (val == 1) |
3538 | break; | 3987 | break; |
3539 | 3988 | ||
@@ -3546,9 +3995,9 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
3546 | } | 3995 | } |
3547 | 3996 | ||
3548 | /* Reset and init BRB, PRS */ | 3997 | /* Reset and init BRB, PRS */ |
3549 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3); | 3998 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); |
3550 | msleep(50); | 3999 | msleep(50); |
3551 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3); | 4000 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); |
3552 | msleep(50); | 4001 | msleep(50); |
3553 | bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END); | 4002 | bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END); |
3554 | bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); | 4003 | bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); |
@@ -3572,13 +4021,9 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
3572 | packets of size 11*0x10 = 0xb0 */ | 4021 | packets of size 11*0x10 = 0xb0 */ |
3573 | count = 1000 * factor; | 4022 | count = 1000 * factor; |
3574 | while (count) { | 4023 | while (count) { |
3575 | #ifdef BNX2X_DMAE_RD | 4024 | |
3576 | bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); | 4025 | bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); |
3577 | val = *bnx2x_sp(bp, wb_data[0]); | 4026 | val = *bnx2x_sp(bp, wb_data[0]); |
3578 | #else | ||
3579 | val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET); | ||
3580 | REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4); | ||
3581 | #endif | ||
3582 | if (val == 0xb0) | 4027 | if (val == 0xb0) |
3583 | break; | 4028 | break; |
3584 | 4029 | ||
@@ -3648,85 +4093,75 @@ static void enable_blocks_attention(struct bnx2x *bp) | |||
3648 | REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); | 4093 | REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); |
3649 | REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); | 4094 | REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); |
3650 | REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); | 4095 | REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); |
3651 | /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ | 4096 | /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ |
3652 | /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ | 4097 | /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ |
3653 | REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); | 4098 | REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); |
3654 | REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); | 4099 | REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); |
3655 | REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); | 4100 | REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); |
3656 | /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ | 4101 | /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ |
3657 | /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ | 4102 | /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ |
3658 | REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); | 4103 | REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); |
3659 | REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); | 4104 | REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); |
3660 | REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); | 4105 | REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); |
3661 | REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); | 4106 | REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); |
3662 | /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ | 4107 | /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ |
3663 | /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ | 4108 | /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ |
3664 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000); | 4109 | if (CHIP_REV_IS_FPGA(bp)) |
4110 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); | ||
4111 | else | ||
4112 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); | ||
3665 | REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); | 4113 | REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); |
3666 | REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); | 4114 | REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); |
3667 | REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); | 4115 | REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); |
3668 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ | 4116 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ |
3669 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */ | 4117 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */ |
3670 | REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); | 4118 | REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); |
3671 | REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); | 4119 | REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); |
3672 | /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ | 4120 | /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ |
3673 | REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ | 4121 | REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ |
3674 | } | 4122 | } |
3675 | 4123 | ||
3676 | static int bnx2x_function_init(struct bnx2x *bp, int mode) | 4124 | |
4125 | static int bnx2x_init_common(struct bnx2x *bp) | ||
3677 | { | 4126 | { |
3678 | int func = bp->port; | ||
3679 | int port = func ? PORT1 : PORT0; | ||
3680 | u32 val, i; | 4127 | u32 val, i; |
3681 | #ifdef USE_DMAE | ||
3682 | u32 wb_write[2]; | ||
3683 | #endif | ||
3684 | |||
3685 | DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode); | ||
3686 | if ((func != 0) && (func != 1)) { | ||
3687 | BNX2X_ERR("BAD function number (%d)\n", func); | ||
3688 | return -ENODEV; | ||
3689 | } | ||
3690 | 4128 | ||
3691 | bnx2x_gunzip_init(bp); | 4129 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); |
3692 | 4130 | ||
3693 | if (mode & 0x1) { /* init common */ | 4131 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); |
3694 | DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n", | 4132 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); |
3695 | func, mode); | ||
3696 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, | ||
3697 | 0xffffffff); | ||
3698 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, | ||
3699 | 0xfffc); | ||
3700 | bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END); | ||
3701 | 4133 | ||
3702 | REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); | 4134 | bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END); |
3703 | msleep(30); | 4135 | if (CHIP_IS_E1H(bp)) |
3704 | REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0); | 4136 | REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp)); |
3705 | 4137 | ||
3706 | bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END); | 4138 | REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); |
3707 | bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END); | 4139 | msleep(30); |
4140 | REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0); | ||
3708 | 4141 | ||
3709 | bnx2x_init_pxp(bp); | 4142 | bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END); |
4143 | if (CHIP_IS_E1(bp)) { | ||
4144 | /* enable HW interrupt from PXP on USDM overflow | ||
4145 | bit 16 on INT_MASK_0 */ | ||
4146 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); | ||
4147 | } | ||
3710 | 4148 | ||
3711 | if (CHIP_REV(bp) == CHIP_REV_Ax) { | 4149 | bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END); |
3712 | /* enable HW interrupt from PXP on USDM | 4150 | bnx2x_init_pxp(bp); |
3713 | overflow bit 16 on INT_MASK_0 */ | ||
3714 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); | ||
3715 | } | ||
3716 | 4151 | ||
3717 | #ifdef __BIG_ENDIAN | 4152 | #ifdef __BIG_ENDIAN |
3718 | REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); | 4153 | REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); |
3719 | REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); | 4154 | REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); |
3720 | REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); | 4155 | REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); |
3721 | REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); | 4156 | REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); |
3722 | REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); | 4157 | REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); |
3723 | REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1); | 4158 | REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1); |
3724 | 4159 | ||
3725 | /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ | 4160 | /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ |
3726 | REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); | 4161 | REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); |
3727 | REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); | 4162 | REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); |
3728 | REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); | 4163 | REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); |
3729 | REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); | 4164 | REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); |
3730 | #endif | 4165 | #endif |
3731 | 4166 | ||
3732 | #ifndef BCM_ISCSI | 4167 | #ifndef BCM_ISCSI |
@@ -3734,92 +4169,105 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
3734 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | 4169 | REG_WR(bp, PRS_REG_NIC_MODE, 1); |
3735 | #endif | 4170 | #endif |
3736 | 4171 | ||
3737 | REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5); | 4172 | REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); |
3738 | #ifdef BCM_ISCSI | 4173 | #ifdef BCM_ISCSI |
3739 | REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); | 4174 | REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); |
3740 | REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5); | 4175 | REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5); |
3741 | REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5); | 4176 | REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5); |
3742 | #endif | 4177 | #endif |
3743 | 4178 | ||
3744 | bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END); | 4179 | if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) |
4180 | REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); | ||
3745 | 4181 | ||
3746 | /* let the HW do it's magic ... */ | 4182 | /* let the HW do it's magic ... */ |
3747 | msleep(100); | 4183 | msleep(100); |
3748 | /* finish PXP init | 4184 | /* finish PXP init */ |
3749 | (can be moved up if we want to use the DMAE) */ | 4185 | val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); |
3750 | val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); | 4186 | if (val != 1) { |
3751 | if (val != 1) { | 4187 | BNX2X_ERR("PXP2 CFG failed\n"); |
3752 | BNX2X_ERR("PXP2 CFG failed\n"); | 4188 | return -EBUSY; |
3753 | return -EBUSY; | 4189 | } |
3754 | } | 4190 | val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); |
4191 | if (val != 1) { | ||
4192 | BNX2X_ERR("PXP2 RD_INIT failed\n"); | ||
4193 | return -EBUSY; | ||
4194 | } | ||
3755 | 4195 | ||
3756 | val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); | 4196 | REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); |
3757 | if (val != 1) { | 4197 | REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); |
3758 | BNX2X_ERR("PXP2 RD_INIT failed\n"); | ||
3759 | return -EBUSY; | ||
3760 | } | ||
3761 | 4198 | ||
3762 | REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); | 4199 | bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END); |
3763 | REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); | ||
3764 | 4200 | ||
3765 | bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8); | 4201 | /* clean the DMAE memory */ |
4202 | bp->dmae_ready = 1; | ||
4203 | bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8); | ||
3766 | 4204 | ||
3767 | bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END); | 4205 | bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END); |
3768 | bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END); | 4206 | bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END); |
3769 | bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END); | 4207 | bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END); |
3770 | bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END); | 4208 | bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END); |
3771 | 4209 | ||
3772 | #ifdef BNX2X_DMAE_RD | 4210 | bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); |
3773 | bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); | 4211 | bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); |
3774 | bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); | 4212 | bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); |
3775 | bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); | 4213 | bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); |
3776 | bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); | 4214 | |
3777 | #else | 4215 | bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END); |
3778 | REG_RD(bp, XSEM_REG_PASSIVE_BUFFER); | 4216 | /* soft reset pulse */ |
3779 | REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4); | 4217 | REG_WR(bp, QM_REG_SOFT_RESET, 1); |
3780 | REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8); | 4218 | REG_WR(bp, QM_REG_SOFT_RESET, 0); |
3781 | REG_RD(bp, CSEM_REG_PASSIVE_BUFFER); | ||
3782 | REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4); | ||
3783 | REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8); | ||
3784 | REG_RD(bp, TSEM_REG_PASSIVE_BUFFER); | ||
3785 | REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4); | ||
3786 | REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8); | ||
3787 | REG_RD(bp, USEM_REG_PASSIVE_BUFFER); | ||
3788 | REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4); | ||
3789 | REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8); | ||
3790 | #endif | ||
3791 | bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END); | ||
3792 | /* soft reset pulse */ | ||
3793 | REG_WR(bp, QM_REG_SOFT_RESET, 1); | ||
3794 | REG_WR(bp, QM_REG_SOFT_RESET, 0); | ||
3795 | 4219 | ||
3796 | #ifdef BCM_ISCSI | 4220 | #ifdef BCM_ISCSI |
3797 | bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END); | 4221 | bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END); |
3798 | #endif | 4222 | #endif |
3799 | bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END); | ||
3800 | REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS); | ||
3801 | if (CHIP_REV(bp) == CHIP_REV_Ax) { | ||
3802 | /* enable hw interrupt from doorbell Q */ | ||
3803 | REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); | ||
3804 | } | ||
3805 | 4223 | ||
3806 | bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END); | 4224 | bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END); |
4225 | REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT); | ||
4226 | if (!CHIP_REV_IS_SLOW(bp)) { | ||
4227 | /* enable hw interrupt from doorbell Q */ | ||
4228 | REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); | ||
4229 | } | ||
3807 | 4230 | ||
3808 | if (CHIP_REV_IS_SLOW(bp)) { | 4231 | bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END); |
3809 | /* fix for emulation and FPGA for no pause */ | 4232 | if (CHIP_REV_IS_SLOW(bp)) { |
3810 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513); | 4233 | /* fix for emulation and FPGA for no pause */ |
3811 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513); | 4234 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513); |
3812 | REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0); | 4235 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513); |
3813 | REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0); | 4236 | REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0); |
3814 | } | 4237 | REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0); |
4238 | } | ||
3815 | 4239 | ||
3816 | bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); | 4240 | bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); |
4241 | if (CHIP_IS_E1H(bp)) | ||
4242 | REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); | ||
3817 | 4243 | ||
3818 | bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END); | 4244 | bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END); |
3819 | bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END); | 4245 | bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END); |
3820 | bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END); | 4246 | bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END); |
3821 | bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END); | 4247 | bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END); |
3822 | 4248 | ||
4249 | if (CHIP_IS_E1H(bp)) { | ||
4250 | bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, | ||
4251 | STORM_INTMEM_SIZE_E1H/2); | ||
4252 | bnx2x_init_fill(bp, | ||
4253 | TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2, | ||
4254 | 0, STORM_INTMEM_SIZE_E1H/2); | ||
4255 | bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, | ||
4256 | STORM_INTMEM_SIZE_E1H/2); | ||
4257 | bnx2x_init_fill(bp, | ||
4258 | CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2, | ||
4259 | 0, STORM_INTMEM_SIZE_E1H/2); | ||
4260 | bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, | ||
4261 | STORM_INTMEM_SIZE_E1H/2); | ||
4262 | bnx2x_init_fill(bp, | ||
4263 | XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2, | ||
4264 | 0, STORM_INTMEM_SIZE_E1H/2); | ||
4265 | bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, | ||
4266 | STORM_INTMEM_SIZE_E1H/2); | ||
4267 | bnx2x_init_fill(bp, | ||
4268 | USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2, | ||
4269 | 0, STORM_INTMEM_SIZE_E1H/2); | ||
4270 | } else { /* E1 */ | ||
3823 | bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, | 4271 | bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, |
3824 | STORM_INTMEM_SIZE_E1); | 4272 | STORM_INTMEM_SIZE_E1); |
3825 | bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, | 4273 | bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, |
@@ -3828,157 +4276,141 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
3828 | STORM_INTMEM_SIZE_E1); | 4276 | STORM_INTMEM_SIZE_E1); |
3829 | bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, | 4277 | bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, |
3830 | STORM_INTMEM_SIZE_E1); | 4278 | STORM_INTMEM_SIZE_E1); |
4279 | } | ||
3831 | 4280 | ||
3832 | bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END); | 4281 | bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END); |
3833 | bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END); | 4282 | bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END); |
3834 | bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END); | 4283 | bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END); |
3835 | bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END); | 4284 | bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END); |
3836 | |||
3837 | /* sync semi rtc */ | ||
3838 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | ||
3839 | 0x80000000); | ||
3840 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, | ||
3841 | 0x80000000); | ||
3842 | |||
3843 | bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END); | ||
3844 | bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END); | ||
3845 | bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END); | ||
3846 | |||
3847 | REG_WR(bp, SRC_REG_SOFT_RST, 1); | ||
3848 | for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) { | ||
3849 | REG_WR(bp, i, 0xc0cac01a); | ||
3850 | /* TODO: replace with something meaningful */ | ||
3851 | } | ||
3852 | /* SRCH COMMON comes here */ | ||
3853 | REG_WR(bp, SRC_REG_SOFT_RST, 0); | ||
3854 | |||
3855 | if (sizeof(union cdu_context) != 1024) { | ||
3856 | /* we currently assume that a context is 1024 bytes */ | ||
3857 | printk(KERN_ALERT PFX "please adjust the size of" | ||
3858 | " cdu_context(%ld)\n", | ||
3859 | (long)sizeof(union cdu_context)); | ||
3860 | } | ||
3861 | val = (4 << 24) + (0 << 12) + 1024; | ||
3862 | REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); | ||
3863 | bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END); | ||
3864 | |||
3865 | bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END); | ||
3866 | REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); | ||
3867 | |||
3868 | bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END); | ||
3869 | bnx2x_init_block(bp, MISC_AEU_COMMON_START, | ||
3870 | MISC_AEU_COMMON_END); | ||
3871 | /* RXPCS COMMON comes here */ | ||
3872 | /* EMAC0 COMMON comes here */ | ||
3873 | /* EMAC1 COMMON comes here */ | ||
3874 | /* DBU COMMON comes here */ | ||
3875 | /* DBG COMMON comes here */ | ||
3876 | bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END); | ||
3877 | 4285 | ||
3878 | if (CHIP_REV_IS_SLOW(bp)) | 4286 | /* sync semi rtc */ |
3879 | msleep(200); | 4287 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, |
4288 | 0x80000000); | ||
4289 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, | ||
4290 | 0x80000000); | ||
3880 | 4291 | ||
3881 | /* finish CFC init */ | 4292 | bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END); |
3882 | val = REG_RD(bp, CFC_REG_LL_INIT_DONE); | 4293 | bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END); |
3883 | if (val != 1) { | 4294 | bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END); |
3884 | BNX2X_ERR("CFC LL_INIT failed\n"); | ||
3885 | return -EBUSY; | ||
3886 | } | ||
3887 | 4295 | ||
3888 | val = REG_RD(bp, CFC_REG_AC_INIT_DONE); | 4296 | REG_WR(bp, SRC_REG_SOFT_RST, 1); |
3889 | if (val != 1) { | 4297 | for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) { |
3890 | BNX2X_ERR("CFC AC_INIT failed\n"); | 4298 | REG_WR(bp, i, 0xc0cac01a); |
3891 | return -EBUSY; | 4299 | /* TODO: replace with something meaningful */ |
3892 | } | 4300 | } |
4301 | if (CHIP_IS_E1H(bp)) | ||
4302 | bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END); | ||
4303 | REG_WR(bp, SRC_REG_SOFT_RST, 0); | ||
3893 | 4304 | ||
3894 | val = REG_RD(bp, CFC_REG_CAM_INIT_DONE); | 4305 | if (sizeof(union cdu_context) != 1024) |
3895 | if (val != 1) { | 4306 | /* we currently assume that a context is 1024 bytes */ |
3896 | BNX2X_ERR("CFC CAM_INIT failed\n"); | 4307 | printk(KERN_ALERT PFX "please adjust the size of" |
3897 | return -EBUSY; | 4308 | " cdu_context(%ld)\n", (long)sizeof(union cdu_context)); |
3898 | } | ||
3899 | 4309 | ||
3900 | REG_WR(bp, CFC_REG_DEBUG0, 0); | 4310 | bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END); |
4311 | val = (4 << 24) + (0 << 12) + 1024; | ||
4312 | REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); | ||
4313 | if (CHIP_IS_E1(bp)) { | ||
4314 | /* !!! fix pxp client crdit until excel update */ | ||
4315 | REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264); | ||
4316 | REG_WR(bp, CDU_REG_CDU_DEBUG, 0); | ||
4317 | } | ||
3901 | 4318 | ||
3902 | /* read NIG statistic | 4319 | bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END); |
3903 | to see if this is our first up since powerup */ | 4320 | REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); |
3904 | #ifdef BNX2X_DMAE_RD | ||
3905 | bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); | ||
3906 | val = *bnx2x_sp(bp, wb_data[0]); | ||
3907 | #else | ||
3908 | val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET); | ||
3909 | REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4); | ||
3910 | #endif | ||
3911 | /* do internal memory self test */ | ||
3912 | if ((val == 0) && bnx2x_int_mem_test(bp)) { | ||
3913 | BNX2X_ERR("internal mem selftest failed\n"); | ||
3914 | return -EBUSY; | ||
3915 | } | ||
3916 | 4321 | ||
3917 | /* clear PXP2 attentions */ | 4322 | bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END); |
3918 | REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR); | 4323 | bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END); |
3919 | 4324 | ||
3920 | enable_blocks_attention(bp); | 4325 | /* PXPCS COMMON comes here */ |
3921 | /* enable_blocks_parity(bp); */ | 4326 | /* Reset PCIE errors for debug */ |
4327 | REG_WR(bp, 0x2814, 0xffffffff); | ||
4328 | REG_WR(bp, 0x3820, 0xffffffff); | ||
3922 | 4329 | ||
3923 | switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 4330 | /* EMAC0 COMMON comes here */ |
3924 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 4331 | /* EMAC1 COMMON comes here */ |
3925 | /* Fan failure is indicated by SPIO 5 */ | 4332 | /* DBU COMMON comes here */ |
3926 | bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, | 4333 | /* DBG COMMON comes here */ |
3927 | MISC_REGISTERS_SPIO_INPUT_HI_Z); | ||
3928 | 4334 | ||
3929 | /* set to active low mode */ | 4335 | bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END); |
3930 | val = REG_RD(bp, MISC_REG_SPIO_INT); | 4336 | if (CHIP_IS_E1H(bp)) { |
3931 | val |= ((1 << MISC_REGISTERS_SPIO_5) << | 4337 | REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp)); |
4338 | REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp)); | ||
4339 | } | ||
4340 | |||
4341 | if (CHIP_REV_IS_SLOW(bp)) | ||
4342 | msleep(200); | ||
4343 | |||
4344 | /* finish CFC init */ | ||
4345 | val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); | ||
4346 | if (val != 1) { | ||
4347 | BNX2X_ERR("CFC LL_INIT failed\n"); | ||
4348 | return -EBUSY; | ||
4349 | } | ||
4350 | val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); | ||
4351 | if (val != 1) { | ||
4352 | BNX2X_ERR("CFC AC_INIT failed\n"); | ||
4353 | return -EBUSY; | ||
4354 | } | ||
4355 | val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); | ||
4356 | if (val != 1) { | ||
4357 | BNX2X_ERR("CFC CAM_INIT failed\n"); | ||
4358 | return -EBUSY; | ||
4359 | } | ||
4360 | REG_WR(bp, CFC_REG_DEBUG0, 0); | ||
4361 | |||
4362 | /* read NIG statistic | ||
4363 | to see if this is our first up since powerup */ | ||
4364 | bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); | ||
4365 | val = *bnx2x_sp(bp, wb_data[0]); | ||
4366 | |||
4367 | /* do internal memory self test */ | ||
4368 | if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) { | ||
4369 | BNX2X_ERR("internal mem self test failed\n"); | ||
4370 | return -EBUSY; | ||
4371 | } | ||
4372 | |||
4373 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | ||
4374 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | ||
4375 | /* Fan failure is indicated by SPIO 5 */ | ||
4376 | bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, | ||
4377 | MISC_REGISTERS_SPIO_INPUT_HI_Z); | ||
4378 | |||
4379 | /* set to active low mode */ | ||
4380 | val = REG_RD(bp, MISC_REG_SPIO_INT); | ||
4381 | val |= ((1 << MISC_REGISTERS_SPIO_5) << | ||
3932 | MISC_REGISTERS_SPIO_INT_OLD_SET_POS); | 4382 | MISC_REGISTERS_SPIO_INT_OLD_SET_POS); |
3933 | REG_WR(bp, MISC_REG_SPIO_INT, val); | 4383 | REG_WR(bp, MISC_REG_SPIO_INT, val); |
3934 | 4384 | ||
3935 | /* enable interrupt to signal the IGU */ | 4385 | /* enable interrupt to signal the IGU */ |
3936 | val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); | 4386 | val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); |
3937 | val |= (1 << MISC_REGISTERS_SPIO_5); | 4387 | val |= (1 << MISC_REGISTERS_SPIO_5); |
3938 | REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); | 4388 | REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); |
3939 | break; | 4389 | break; |
3940 | 4390 | ||
3941 | default: | 4391 | default: |
3942 | break; | 4392 | break; |
3943 | } | 4393 | } |
3944 | 4394 | ||
3945 | } /* end of common init */ | 4395 | /* clear PXP2 attentions */ |
4396 | REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); | ||
3946 | 4397 | ||
3947 | /* per port init */ | 4398 | enable_blocks_attention(bp); |
3948 | 4399 | ||
3949 | /* the phys address is shifted right 12 bits and has an added | 4400 | return 0; |
3950 | 1=valid bit added to the 53rd bit | 4401 | } |
3951 | then since this is a wide register(TM) | ||
3952 | we split it into two 32 bit writes | ||
3953 | */ | ||
3954 | #define RQ_ONCHIP_AT_PORT_SIZE 384 | ||
3955 | #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) | ||
3956 | #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) | ||
3957 | #define PXP_ONE_ILT(x) ((x << 10) | x) | ||
3958 | 4402 | ||
3959 | DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func); | 4403 | static int bnx2x_init_port(struct bnx2x *bp) |
4404 | { | ||
4405 | int port = BP_PORT(bp); | ||
4406 | u32 val; | ||
3960 | 4407 | ||
3961 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0); | 4408 | DP(BNX2X_MSG_MCP, "starting port init port %x\n", port); |
4409 | |||
4410 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); | ||
3962 | 4411 | ||
3963 | /* Port PXP comes here */ | 4412 | /* Port PXP comes here */ |
3964 | /* Port PXP2 comes here */ | 4413 | /* Port PXP2 comes here */ |
3965 | |||
3966 | /* Offset is | ||
3967 | * Port0 0 | ||
3968 | * Port1 384 */ | ||
3969 | i = func * RQ_ONCHIP_AT_PORT_SIZE; | ||
3970 | #ifdef USE_DMAE | ||
3971 | wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)); | ||
3972 | wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)); | ||
3973 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); | ||
3974 | #else | ||
3975 | REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, | ||
3976 | ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context))); | ||
3977 | REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4, | ||
3978 | ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context))); | ||
3979 | #endif | ||
3980 | REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i)); | ||
3981 | |||
3982 | #ifdef BCM_ISCSI | 4414 | #ifdef BCM_ISCSI |
3983 | /* Port0 1 | 4415 | /* Port0 1 |
3984 | * Port1 385 */ | 4416 | * Port1 385 */ |
@@ -4004,30 +4436,9 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
4004 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); | 4436 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); |
4005 | REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); | 4437 | REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); |
4006 | #endif | 4438 | #endif |
4007 | 4439 | /* Port CMs come here */ | |
4008 | /* Port TCM comes here */ | ||
4009 | /* Port UCM comes here */ | ||
4010 | /* Port CCM comes here */ | ||
4011 | bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START, | ||
4012 | func ? XCM_PORT1_END : XCM_PORT0_END); | ||
4013 | |||
4014 | #ifdef USE_DMAE | ||
4015 | wb_write[0] = 0; | ||
4016 | wb_write[1] = 0; | ||
4017 | #endif | ||
4018 | for (i = 0; i < 32; i++) { | ||
4019 | REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i); | ||
4020 | #ifdef USE_DMAE | ||
4021 | REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2); | ||
4022 | #else | ||
4023 | REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0); | ||
4024 | REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0); | ||
4025 | #endif | ||
4026 | } | ||
4027 | REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1); | ||
4028 | 4440 | ||
4029 | /* Port QM comes here */ | 4441 | /* Port QM comes here */ |
4030 | |||
4031 | #ifdef BCM_ISCSI | 4442 | #ifdef BCM_ISCSI |
4032 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20); | 4443 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20); |
4033 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31); | 4444 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31); |
@@ -4042,31 +4453,32 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
4042 | /* Port CSDM comes here */ | 4453 | /* Port CSDM comes here */ |
4043 | /* Port USDM comes here */ | 4454 | /* Port USDM comes here */ |
4044 | /* Port XSDM comes here */ | 4455 | /* Port XSDM comes here */ |
4045 | bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START, | 4456 | bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START, |
4046 | func ? TSEM_PORT1_END : TSEM_PORT0_END); | 4457 | port ? TSEM_PORT1_END : TSEM_PORT0_END); |
4047 | bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START, | 4458 | bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START, |
4048 | func ? USEM_PORT1_END : USEM_PORT0_END); | 4459 | port ? USEM_PORT1_END : USEM_PORT0_END); |
4049 | bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START, | 4460 | bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START, |
4050 | func ? CSEM_PORT1_END : CSEM_PORT0_END); | 4461 | port ? CSEM_PORT1_END : CSEM_PORT0_END); |
4051 | bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START, | 4462 | bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START, |
4052 | func ? XSEM_PORT1_END : XSEM_PORT0_END); | 4463 | port ? XSEM_PORT1_END : XSEM_PORT0_END); |
4053 | /* Port UPB comes here */ | 4464 | /* Port UPB comes here */ |
4054 | /* Port XSDM comes here */ | 4465 | /* Port XPB comes here */ |
4055 | bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START, | 4466 | |
4056 | func ? PBF_PORT1_END : PBF_PORT0_END); | 4467 | bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START, |
4468 | port ? PBF_PORT1_END : PBF_PORT0_END); | ||
4057 | 4469 | ||
4058 | /* configure PBF to work without PAUSE mtu 9000 */ | 4470 | /* configure PBF to work without PAUSE mtu 9000 */ |
4059 | REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0); | 4471 | REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); |
4060 | 4472 | ||
4061 | /* update threshold */ | 4473 | /* update threshold */ |
4062 | REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16)); | 4474 | REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); |
4063 | /* update init credit */ | 4475 | /* update init credit */ |
4064 | REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22); | 4476 | REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); |
4065 | 4477 | ||
4066 | /* probe changes */ | 4478 | /* probe changes */ |
4067 | REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1); | 4479 | REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); |
4068 | msleep(5); | 4480 | msleep(5); |
4069 | REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0); | 4481 | REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); |
4070 | 4482 | ||
4071 | #ifdef BCM_ISCSI | 4483 | #ifdef BCM_ISCSI |
4072 | /* tell the searcher where the T2 table is */ | 4484 | /* tell the searcher where the T2 table is */ |
@@ -4084,23 +4496,57 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
4084 | #endif | 4496 | #endif |
4085 | /* Port CDU comes here */ | 4497 | /* Port CDU comes here */ |
4086 | /* Port CFC comes here */ | 4498 | /* Port CFC comes here */ |
4087 | bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START, | 4499 | |
4088 | func ? HC_PORT1_END : HC_PORT0_END); | 4500 | if (CHIP_IS_E1(bp)) { |
4089 | bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START : | 4501 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); |
4502 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); | ||
4503 | } | ||
4504 | bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START, | ||
4505 | port ? HC_PORT1_END : HC_PORT0_END); | ||
4506 | |||
4507 | bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START : | ||
4090 | MISC_AEU_PORT0_START, | 4508 | MISC_AEU_PORT0_START, |
4091 | func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END); | 4509 | port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END); |
4510 | /* init aeu_mask_attn_func_0/1: | ||
4511 | * - SF mode: bits 3-7 are masked. only bits 0-2 are in use | ||
4512 | * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF | ||
4513 | * bits 4-7 are used for "per vn group attention" */ | ||
4514 | REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, | ||
4515 | (IS_E1HMF(bp) ? 0xF7 : 0x7)); | ||
4516 | |||
4092 | /* Port PXPCS comes here */ | 4517 | /* Port PXPCS comes here */ |
4093 | /* Port EMAC0 comes here */ | 4518 | /* Port EMAC0 comes here */ |
4094 | /* Port EMAC1 comes here */ | 4519 | /* Port EMAC1 comes here */ |
4095 | /* Port DBU comes here */ | 4520 | /* Port DBU comes here */ |
4096 | /* Port DBG comes here */ | 4521 | /* Port DBG comes here */ |
4097 | bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START, | 4522 | bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START, |
4098 | func ? NIG_PORT1_END : NIG_PORT0_END); | 4523 | port ? NIG_PORT1_END : NIG_PORT0_END); |
4099 | REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1); | 4524 | |
4525 | REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); | ||
4526 | |||
4527 | if (CHIP_IS_E1H(bp)) { | ||
4528 | u32 wsum; | ||
4529 | struct cmng_struct_per_port m_cmng_port; | ||
4530 | int vn; | ||
4531 | |||
4532 | /* 0x2 disable e1hov, 0x1 enable */ | ||
4533 | REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, | ||
4534 | (IS_E1HMF(bp) ? 0x1 : 0x2)); | ||
4535 | |||
4536 | /* Init RATE SHAPING and FAIRNESS contexts. | ||
4537 | Initialize as if there is 10G link. */ | ||
4538 | wsum = bnx2x_calc_vn_wsum(bp); | ||
4539 | bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port); | ||
4540 | if (IS_E1HMF(bp)) | ||
4541 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | ||
4542 | bnx2x_init_vn_minmax(bp, 2*vn + port, | ||
4543 | wsum, 10000, &m_cmng_port); | ||
4544 | } | ||
4545 | |||
4100 | /* Port MCP comes here */ | 4546 | /* Port MCP comes here */ |
4101 | /* Port DMAE comes here */ | 4547 | /* Port DMAE comes here */ |
4102 | 4548 | ||
4103 | switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 4549 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
4104 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 4550 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
4105 | /* add SPIO 5 to group 0 */ | 4551 | /* add SPIO 5 to group 0 */ |
4106 | val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 4552 | val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
@@ -4114,48 +4560,150 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
4114 | 4560 | ||
4115 | bnx2x__link_reset(bp); | 4561 | bnx2x__link_reset(bp); |
4116 | 4562 | ||
4563 | return 0; | ||
4564 | } | ||
4565 | |||
4566 | #define ILT_PER_FUNC (768/2) | ||
4567 | #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) | ||
4568 | /* the phys address is shifted right 12 bits and has an added | ||
4569 | 1=valid bit added to the 53rd bit | ||
4570 | then since this is a wide register(TM) | ||
4571 | we split it into two 32 bit writes | ||
4572 | */ | ||
4573 | #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) | ||
4574 | #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) | ||
4575 | #define PXP_ONE_ILT(x) (((x) << 10) | x) | ||
4576 | #define PXP_ILT_RANGE(f, l) (((l) << 10) | f) | ||
4577 | |||
4578 | #define CNIC_ILT_LINES 0 | ||
4579 | |||
4580 | static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) | ||
4581 | { | ||
4582 | int reg; | ||
4583 | |||
4584 | if (CHIP_IS_E1H(bp)) | ||
4585 | reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; | ||
4586 | else /* E1 */ | ||
4587 | reg = PXP2_REG_RQ_ONCHIP_AT + index*8; | ||
4588 | |||
4589 | bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); | ||
4590 | } | ||
4591 | |||
4592 | static int bnx2x_init_func(struct bnx2x *bp) | ||
4593 | { | ||
4594 | int port = BP_PORT(bp); | ||
4595 | int func = BP_FUNC(bp); | ||
4596 | int i; | ||
4597 | |||
4598 | DP(BNX2X_MSG_MCP, "starting func init func %x\n", func); | ||
4599 | |||
4600 | i = FUNC_ILT_BASE(func); | ||
4601 | |||
4602 | bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context)); | ||
4603 | if (CHIP_IS_E1H(bp)) { | ||
4604 | REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i); | ||
4605 | REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES); | ||
4606 | } else /* E1 */ | ||
4607 | REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, | ||
4608 | PXP_ILT_RANGE(i, i + CNIC_ILT_LINES)); | ||
4609 | |||
4610 | |||
4611 | if (CHIP_IS_E1H(bp)) { | ||
4612 | for (i = 0; i < 9; i++) | ||
4613 | bnx2x_init_block(bp, | ||
4614 | cm_start[func][i], cm_end[func][i]); | ||
4615 | |||
4616 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); | ||
4617 | REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov); | ||
4618 | } | ||
4619 | |||
4620 | /* HC init per function */ | ||
4621 | if (CHIP_IS_E1H(bp)) { | ||
4622 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); | ||
4623 | |||
4624 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); | ||
4625 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); | ||
4626 | } | ||
4627 | bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]); | ||
4628 | |||
4629 | if (CHIP_IS_E1H(bp)) | ||
4630 | REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func); | ||
4631 | |||
4117 | /* Reset PCIE errors for debug */ | 4632 | /* Reset PCIE errors for debug */ |
4118 | REG_WR(bp, 0x2114, 0xffffffff); | 4633 | REG_WR(bp, 0x2114, 0xffffffff); |
4119 | REG_WR(bp, 0x2120, 0xffffffff); | 4634 | REG_WR(bp, 0x2120, 0xffffffff); |
4120 | REG_WR(bp, 0x2814, 0xffffffff); | ||
4121 | 4635 | ||
4122 | /* !!! move to init_values.h */ | 4636 | return 0; |
4123 | REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1); | 4637 | } |
4124 | REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1); | 4638 | |
4125 | REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1); | 4639 | static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) |
4126 | REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1); | 4640 | { |
4641 | int i, rc = 0; | ||
4127 | 4642 | ||
4128 | REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1); | 4643 | DP(BNX2X_MSG_MCP, "function %d load_code %x\n", |
4129 | REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1); | 4644 | BP_FUNC(bp), load_code); |
4130 | REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264); | ||
4131 | REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0); | ||
4132 | 4645 | ||
4133 | bnx2x_gunzip_end(bp); | 4646 | bp->dmae_ready = 0; |
4647 | mutex_init(&bp->dmae_mutex); | ||
4648 | bnx2x_gunzip_init(bp); | ||
4134 | 4649 | ||
4135 | if (!nomcp) { | 4650 | switch (load_code) { |
4136 | port = bp->port; | 4651 | case FW_MSG_CODE_DRV_LOAD_COMMON: |
4652 | rc = bnx2x_init_common(bp); | ||
4653 | if (rc) | ||
4654 | goto init_hw_err; | ||
4655 | /* no break */ | ||
4656 | |||
4657 | case FW_MSG_CODE_DRV_LOAD_PORT: | ||
4658 | bp->dmae_ready = 1; | ||
4659 | rc = bnx2x_init_port(bp); | ||
4660 | if (rc) | ||
4661 | goto init_hw_err; | ||
4662 | /* no break */ | ||
4663 | |||
4664 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: | ||
4665 | bp->dmae_ready = 1; | ||
4666 | rc = bnx2x_init_func(bp); | ||
4667 | if (rc) | ||
4668 | goto init_hw_err; | ||
4669 | break; | ||
4670 | |||
4671 | default: | ||
4672 | BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); | ||
4673 | break; | ||
4674 | } | ||
4675 | |||
4676 | if (!BP_NOMCP(bp)) { | ||
4677 | int func = BP_FUNC(bp); | ||
4137 | 4678 | ||
4138 | bp->fw_drv_pulse_wr_seq = | 4679 | bp->fw_drv_pulse_wr_seq = |
4139 | (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) & | 4680 | (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) & |
4140 | DRV_PULSE_SEQ_MASK); | 4681 | DRV_PULSE_SEQ_MASK); |
4141 | bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param); | 4682 | bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); |
4142 | DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n", | 4683 | DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n", |
4143 | bp->fw_drv_pulse_wr_seq, bp->fw_mb); | 4684 | bp->fw_drv_pulse_wr_seq, bp->func_stx); |
4144 | } else { | 4685 | } else |
4145 | bp->fw_mb = 0; | 4686 | bp->func_stx = 0; |
4146 | } | ||
4147 | 4687 | ||
4148 | return 0; | 4688 | /* this needs to be done before gunzip end */ |
4689 | bnx2x_zero_def_sb(bp); | ||
4690 | for_each_queue(bp, i) | ||
4691 | bnx2x_zero_sb(bp, BP_L_ID(bp) + i); | ||
4692 | |||
4693 | init_hw_err: | ||
4694 | bnx2x_gunzip_end(bp); | ||
4695 | |||
4696 | return rc; | ||
4149 | } | 4697 | } |
4150 | 4698 | ||
4151 | /* send the MCP a request, block until there is a reply */ | 4699 | /* send the MCP a request, block until there is a reply */ |
4152 | static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) | 4700 | static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) |
4153 | { | 4701 | { |
4154 | int port = bp->port; | 4702 | int func = BP_FUNC(bp); |
4155 | u32 seq = ++bp->fw_seq; | 4703 | u32 seq = ++bp->fw_seq; |
4156 | u32 rc = 0; | 4704 | u32 rc = 0; |
4157 | 4705 | ||
4158 | SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq)); | 4706 | SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); |
4159 | DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); | 4707 | DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); |
4160 | 4708 | ||
4161 | /* let the FW do it's magic ... */ | 4709 | /* let the FW do it's magic ... */ |
@@ -4164,7 +4712,7 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) | |||
4164 | if (CHIP_REV_IS_SLOW(bp)) | 4712 | if (CHIP_REV_IS_SLOW(bp)) |
4165 | msleep(900); | 4713 | msleep(900); |
4166 | 4714 | ||
4167 | rc = SHMEM_RD(bp, func_mb[port].fw_mb_header); | 4715 | rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); |
4168 | DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); | 4716 | DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); |
4169 | 4717 | ||
4170 | /* is this a reply to our command? */ | 4718 | /* is this a reply to our command? */ |
@@ -4229,15 +4777,13 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
4229 | NUM_RCQ_BD); | 4777 | NUM_RCQ_BD); |
4230 | } | 4778 | } |
4231 | 4779 | ||
4232 | BNX2X_FREE(bp->fp); | ||
4233 | |||
4234 | /* end of fastpath */ | 4780 | /* end of fastpath */ |
4235 | 4781 | ||
4236 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | 4782 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, |
4237 | (sizeof(struct host_def_status_block))); | 4783 | sizeof(struct host_def_status_block)); |
4238 | 4784 | ||
4239 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, | 4785 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, |
4240 | (sizeof(struct bnx2x_slowpath))); | 4786 | sizeof(struct bnx2x_slowpath)); |
4241 | 4787 | ||
4242 | #ifdef BCM_ISCSI | 4788 | #ifdef BCM_ISCSI |
4243 | BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); | 4789 | BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); |
@@ -4273,8 +4819,6 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
4273 | int i; | 4819 | int i; |
4274 | 4820 | ||
4275 | /* fastpath */ | 4821 | /* fastpath */ |
4276 | BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues); | ||
4277 | |||
4278 | for_each_queue(bp, i) { | 4822 | for_each_queue(bp, i) { |
4279 | bnx2x_fp(bp, i, bp) = bp; | 4823 | bnx2x_fp(bp, i, bp) = bp; |
4280 | 4824 | ||
@@ -4370,8 +4914,6 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) | |||
4370 | u16 sw_prod = fp->tx_pkt_prod; | 4914 | u16 sw_prod = fp->tx_pkt_prod; |
4371 | u16 sw_cons = fp->tx_pkt_cons; | 4915 | u16 sw_cons = fp->tx_pkt_cons; |
4372 | 4916 | ||
4373 | BUG_TRAP(fp->tx_buf_ring != NULL); | ||
4374 | |||
4375 | while (sw_cons != sw_prod) { | 4917 | while (sw_cons != sw_prod) { |
4376 | bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons)); | 4918 | bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons)); |
4377 | sw_cons++; | 4919 | sw_cons++; |
@@ -4386,8 +4928,6 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
4386 | for_each_queue(bp, j) { | 4928 | for_each_queue(bp, j) { |
4387 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 4929 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
4388 | 4930 | ||
4389 | BUG_TRAP(fp->rx_buf_ring != NULL); | ||
4390 | |||
4391 | for (i = 0; i < NUM_RX_BD; i++) { | 4931 | for (i = 0; i < NUM_RX_BD; i++) { |
4392 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; | 4932 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; |
4393 | struct sk_buff *skb = rx_buf->skb; | 4933 | struct sk_buff *skb = rx_buf->skb; |
@@ -4414,7 +4954,7 @@ static void bnx2x_free_skbs(struct bnx2x *bp) | |||
4414 | 4954 | ||
4415 | static void bnx2x_free_msix_irqs(struct bnx2x *bp) | 4955 | static void bnx2x_free_msix_irqs(struct bnx2x *bp) |
4416 | { | 4956 | { |
4417 | int i; | 4957 | int i, offset = 1; |
4418 | 4958 | ||
4419 | free_irq(bp->msix_table[0].vector, bp->dev); | 4959 | free_irq(bp->msix_table[0].vector, bp->dev); |
4420 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", | 4960 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", |
@@ -4422,26 +4962,22 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp) | |||
4422 | 4962 | ||
4423 | for_each_queue(bp, i) { | 4963 | for_each_queue(bp, i) { |
4424 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " | 4964 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " |
4425 | "state(%x)\n", i, bp->msix_table[i + 1].vector, | 4965 | "state %x\n", i, bp->msix_table[i + offset].vector, |
4426 | bnx2x_fp(bp, i, state)); | 4966 | bnx2x_fp(bp, i, state)); |
4427 | 4967 | ||
4428 | if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) | 4968 | if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) |
4429 | BNX2X_ERR("IRQ of fp #%d being freed while " | 4969 | BNX2X_ERR("IRQ of fp #%d being freed while " |
4430 | "state != closed\n", i); | 4970 | "state != closed\n", i); |
4431 | 4971 | ||
4432 | free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]); | 4972 | free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]); |
4433 | } | 4973 | } |
4434 | |||
4435 | } | 4974 | } |
4436 | 4975 | ||
4437 | static void bnx2x_free_irq(struct bnx2x *bp) | 4976 | static void bnx2x_free_irq(struct bnx2x *bp) |
4438 | { | 4977 | { |
4439 | |||
4440 | if (bp->flags & USING_MSIX_FLAG) { | 4978 | if (bp->flags & USING_MSIX_FLAG) { |
4441 | |||
4442 | bnx2x_free_msix_irqs(bp); | 4979 | bnx2x_free_msix_irqs(bp); |
4443 | pci_disable_msix(bp->pdev); | 4980 | pci_disable_msix(bp->pdev); |
4444 | |||
4445 | bp->flags &= ~USING_MSIX_FLAG; | 4981 | bp->flags &= ~USING_MSIX_FLAG; |
4446 | 4982 | ||
4447 | } else | 4983 | } else |
@@ -4450,87 +4986,87 @@ static void bnx2x_free_irq(struct bnx2x *bp) | |||
4450 | 4986 | ||
4451 | static int bnx2x_enable_msix(struct bnx2x *bp) | 4987 | static int bnx2x_enable_msix(struct bnx2x *bp) |
4452 | { | 4988 | { |
4453 | 4989 | int i, rc, offset; | |
4454 | int i; | ||
4455 | 4990 | ||
4456 | bp->msix_table[0].entry = 0; | 4991 | bp->msix_table[0].entry = 0; |
4457 | for_each_queue(bp, i) | 4992 | offset = 1; |
4458 | bp->msix_table[i + 1].entry = i + 1; | 4993 | DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n"); |
4459 | 4994 | ||
4460 | if (pci_enable_msix(bp->pdev, &bp->msix_table[0], | 4995 | for_each_queue(bp, i) { |
4461 | bp->num_queues + 1)){ | 4996 | int igu_vec = offset + i + BP_L_ID(bp); |
4462 | BNX2X_LOG("failed to enable MSI-X\n"); | ||
4463 | return -1; | ||
4464 | 4997 | ||
4998 | bp->msix_table[i + offset].entry = igu_vec; | ||
4999 | DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " | ||
5000 | "(fastpath #%u)\n", i + offset, igu_vec, i); | ||
4465 | } | 5001 | } |
4466 | 5002 | ||
5003 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], | ||
5004 | bp->num_queues + offset); | ||
5005 | if (rc) { | ||
5006 | DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n"); | ||
5007 | return -1; | ||
5008 | } | ||
4467 | bp->flags |= USING_MSIX_FLAG; | 5009 | bp->flags |= USING_MSIX_FLAG; |
4468 | 5010 | ||
4469 | return 0; | 5011 | return 0; |
4470 | |||
4471 | } | 5012 | } |
4472 | 5013 | ||
4473 | |||
4474 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) | 5014 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) |
4475 | { | 5015 | { |
4476 | 5016 | int i, rc, offset = 1; | |
4477 | int i, rc; | ||
4478 | 5017 | ||
4479 | rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, | 5018 | rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, |
4480 | bp->dev->name, bp->dev); | 5019 | bp->dev->name, bp->dev); |
4481 | |||
4482 | if (rc) { | 5020 | if (rc) { |
4483 | BNX2X_ERR("request sp irq failed\n"); | 5021 | BNX2X_ERR("request sp irq failed\n"); |
4484 | return -EBUSY; | 5022 | return -EBUSY; |
4485 | } | 5023 | } |
4486 | 5024 | ||
4487 | for_each_queue(bp, i) { | 5025 | for_each_queue(bp, i) { |
4488 | rc = request_irq(bp->msix_table[i + 1].vector, | 5026 | rc = request_irq(bp->msix_table[i + offset].vector, |
4489 | bnx2x_msix_fp_int, 0, | 5027 | bnx2x_msix_fp_int, 0, |
4490 | bp->dev->name, &bp->fp[i]); | 5028 | bp->dev->name, &bp->fp[i]); |
4491 | |||
4492 | if (rc) { | 5029 | if (rc) { |
4493 | BNX2X_ERR("request fp #%d irq failed " | 5030 | BNX2X_ERR("request fp #%d irq failed rc %d\n", |
4494 | "rc %d\n", i, rc); | 5031 | i + offset, rc); |
4495 | bnx2x_free_msix_irqs(bp); | 5032 | bnx2x_free_msix_irqs(bp); |
4496 | return -EBUSY; | 5033 | return -EBUSY; |
4497 | } | 5034 | } |
4498 | 5035 | ||
4499 | bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ; | 5036 | bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ; |
4500 | |||
4501 | } | 5037 | } |
4502 | 5038 | ||
4503 | return 0; | 5039 | return 0; |
4504 | |||
4505 | } | 5040 | } |
4506 | 5041 | ||
4507 | static int bnx2x_req_irq(struct bnx2x *bp) | 5042 | static int bnx2x_req_irq(struct bnx2x *bp) |
4508 | { | 5043 | { |
5044 | int rc; | ||
4509 | 5045 | ||
4510 | int rc = request_irq(bp->pdev->irq, bnx2x_interrupt, | 5046 | rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED, |
4511 | IRQF_SHARED, bp->dev->name, bp->dev); | 5047 | bp->dev->name, bp->dev); |
4512 | if (!rc) | 5048 | if (!rc) |
4513 | bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; | 5049 | bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; |
4514 | 5050 | ||
4515 | return rc; | 5051 | return rc; |
4516 | |||
4517 | } | 5052 | } |
4518 | 5053 | ||
4519 | /* | 5054 | /* |
4520 | * Init service functions | 5055 | * Init service functions |
4521 | */ | 5056 | */ |
4522 | 5057 | ||
4523 | static void bnx2x_set_mac_addr(struct bnx2x *bp) | 5058 | static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) |
4524 | { | 5059 | { |
4525 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); | 5060 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); |
5061 | int port = BP_PORT(bp); | ||
4526 | 5062 | ||
4527 | /* CAM allocation | 5063 | /* CAM allocation |
4528 | * unicasts 0-31:port0 32-63:port1 | 5064 | * unicasts 0-31:port0 32-63:port1 |
4529 | * multicast 64-127:port0 128-191:port1 | 5065 | * multicast 64-127:port0 128-191:port1 |
4530 | */ | 5066 | */ |
4531 | config->hdr.length_6b = 2; | 5067 | config->hdr.length_6b = 2; |
4532 | config->hdr.offset = bp->port ? 31 : 0; | 5068 | config->hdr.offset = port ? 31 : 0; |
4533 | config->hdr.reserved0 = 0; | 5069 | config->hdr.client_id = BP_CL_ID(bp); |
4534 | config->hdr.reserved1 = 0; | 5070 | config->hdr.reserved1 = 0; |
4535 | 5071 | ||
4536 | /* primary MAC */ | 5072 | /* primary MAC */ |
@@ -4540,7 +5076,7 @@ static void bnx2x_set_mac_addr(struct bnx2x *bp) | |||
4540 | swab16(*(u16 *)&bp->dev->dev_addr[2]); | 5076 | swab16(*(u16 *)&bp->dev->dev_addr[2]); |
4541 | config->config_table[0].cam_entry.lsb_mac_addr = | 5077 | config->config_table[0].cam_entry.lsb_mac_addr = |
4542 | swab16(*(u16 *)&bp->dev->dev_addr[4]); | 5078 | swab16(*(u16 *)&bp->dev->dev_addr[4]); |
4543 | config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port); | 5079 | config->config_table[0].cam_entry.flags = cpu_to_le16(port); |
4544 | config->config_table[0].target_table_entry.flags = 0; | 5080 | config->config_table[0].target_table_entry.flags = 0; |
4545 | config->config_table[0].target_table_entry.client_id = 0; | 5081 | config->config_table[0].target_table_entry.client_id = 0; |
4546 | config->config_table[0].target_table_entry.vlan_id = 0; | 5082 | config->config_table[0].target_table_entry.vlan_id = 0; |
@@ -4554,7 +5090,7 @@ static void bnx2x_set_mac_addr(struct bnx2x *bp) | |||
4554 | config->config_table[1].cam_entry.msb_mac_addr = 0xffff; | 5090 | config->config_table[1].cam_entry.msb_mac_addr = 0xffff; |
4555 | config->config_table[1].cam_entry.middle_mac_addr = 0xffff; | 5091 | config->config_table[1].cam_entry.middle_mac_addr = 0xffff; |
4556 | config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; | 5092 | config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; |
4557 | config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port); | 5093 | config->config_table[1].cam_entry.flags = cpu_to_le16(port); |
4558 | config->config_table[1].target_table_entry.flags = | 5094 | config->config_table[1].target_table_entry.flags = |
4559 | TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; | 5095 | TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; |
4560 | config->config_table[1].target_table_entry.client_id = 0; | 5096 | config->config_table[1].target_table_entry.client_id = 0; |
@@ -4565,64 +5101,105 @@ static void bnx2x_set_mac_addr(struct bnx2x *bp) | |||
4565 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); | 5101 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); |
4566 | } | 5102 | } |
4567 | 5103 | ||
5104 | static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp) | ||
5105 | { | ||
5106 | struct mac_configuration_cmd_e1h *config = | ||
5107 | (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); | ||
5108 | |||
5109 | if (bp->state != BNX2X_STATE_OPEN) { | ||
5110 | DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); | ||
5111 | return; | ||
5112 | } | ||
5113 | |||
5114 | /* CAM allocation for E1H | ||
5115 | * unicasts: by func number | ||
5116 | * multicast: 20+FUNC*20, 20 each | ||
5117 | */ | ||
5118 | config->hdr.length_6b = 1; | ||
5119 | config->hdr.offset = BP_FUNC(bp); | ||
5120 | config->hdr.client_id = BP_CL_ID(bp); | ||
5121 | config->hdr.reserved1 = 0; | ||
5122 | |||
5123 | /* primary MAC */ | ||
5124 | config->config_table[0].msb_mac_addr = | ||
5125 | swab16(*(u16 *)&bp->dev->dev_addr[0]); | ||
5126 | config->config_table[0].middle_mac_addr = | ||
5127 | swab16(*(u16 *)&bp->dev->dev_addr[2]); | ||
5128 | config->config_table[0].lsb_mac_addr = | ||
5129 | swab16(*(u16 *)&bp->dev->dev_addr[4]); | ||
5130 | config->config_table[0].client_id = BP_L_ID(bp); | ||
5131 | config->config_table[0].vlan_id = 0; | ||
5132 | config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); | ||
5133 | config->config_table[0].flags = BP_PORT(bp); | ||
5134 | |||
5135 | DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", | ||
5136 | config->config_table[0].msb_mac_addr, | ||
5137 | config->config_table[0].middle_mac_addr, | ||
5138 | config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); | ||
5139 | |||
5140 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | ||
5141 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | ||
5142 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); | ||
5143 | } | ||
5144 | |||
4568 | static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | 5145 | static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, |
4569 | int *state_p, int poll) | 5146 | int *state_p, int poll) |
4570 | { | 5147 | { |
4571 | /* can take a while if any port is running */ | 5148 | /* can take a while if any port is running */ |
4572 | int timeout = 500; | 5149 | int cnt = 500; |
4573 | 5150 | ||
4574 | DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", | 5151 | DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", |
4575 | poll ? "polling" : "waiting", state, idx); | 5152 | poll ? "polling" : "waiting", state, idx); |
4576 | 5153 | ||
4577 | might_sleep(); | 5154 | might_sleep(); |
4578 | 5155 | while (cnt--) { | |
4579 | while (timeout) { | ||
4580 | |||
4581 | if (poll) { | 5156 | if (poll) { |
4582 | bnx2x_rx_int(bp->fp, 10); | 5157 | bnx2x_rx_int(bp->fp, 10); |
4583 | /* If index is different from 0 | 5158 | /* if index is different from 0 |
4584 | * The reply for some commands will | 5159 | * the reply for some commands will |
4585 | * be on the none default queue | 5160 | * be on the none default queue |
4586 | */ | 5161 | */ |
4587 | if (idx) | 5162 | if (idx) |
4588 | bnx2x_rx_int(&bp->fp[idx], 10); | 5163 | bnx2x_rx_int(&bp->fp[idx], 10); |
4589 | } | 5164 | } |
4590 | 5165 | mb(); /* state is changed by bnx2x_sp_event() */ | |
4591 | mb(); /* state is changed by bnx2x_sp_event()*/ | ||
4592 | 5166 | ||
4593 | if (*state_p == state) | 5167 | if (*state_p == state) |
4594 | return 0; | 5168 | return 0; |
4595 | 5169 | ||
4596 | timeout--; | ||
4597 | msleep(1); | 5170 | msleep(1); |
4598 | |||
4599 | } | 5171 | } |
4600 | 5172 | ||
4601 | /* timeout! */ | 5173 | /* timeout! */ |
4602 | BNX2X_ERR("timeout %s for state %x on IDX [%d]\n", | 5174 | BNX2X_ERR("timeout %s for state %x on IDX [%d]\n", |
4603 | poll ? "polling" : "waiting", state, idx); | 5175 | poll ? "polling" : "waiting", state, idx); |
5176 | #ifdef BNX2X_STOP_ON_ERROR | ||
5177 | bnx2x_panic(); | ||
5178 | #endif | ||
4604 | 5179 | ||
4605 | return -EBUSY; | 5180 | return -EBUSY; |
4606 | } | 5181 | } |
4607 | 5182 | ||
4608 | static int bnx2x_setup_leading(struct bnx2x *bp) | 5183 | static int bnx2x_setup_leading(struct bnx2x *bp) |
4609 | { | 5184 | { |
5185 | int rc; | ||
4610 | 5186 | ||
4611 | /* reset IGU state */ | 5187 | /* reset IGU state */ |
4612 | bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 5188 | bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
4613 | 5189 | ||
4614 | /* SETUP ramrod */ | 5190 | /* SETUP ramrod */ |
4615 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0); | 5191 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0); |
4616 | 5192 | ||
4617 | return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0); | 5193 | /* Wait for completion */ |
5194 | rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0); | ||
4618 | 5195 | ||
5196 | return rc; | ||
4619 | } | 5197 | } |
4620 | 5198 | ||
4621 | static int bnx2x_setup_multi(struct bnx2x *bp, int index) | 5199 | static int bnx2x_setup_multi(struct bnx2x *bp, int index) |
4622 | { | 5200 | { |
4623 | |||
4624 | /* reset IGU state */ | 5201 | /* reset IGU state */ |
4625 | bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 5202 | bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
4626 | 5203 | ||
4627 | /* SETUP ramrod */ | 5204 | /* SETUP ramrod */ |
4628 | bp->fp[index].state = BNX2X_FP_STATE_OPENING; | 5205 | bp->fp[index].state = BNX2X_FP_STATE_OPENING; |
@@ -4631,82 +5208,107 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index) | |||
4631 | /* Wait for completion */ | 5208 | /* Wait for completion */ |
4632 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, | 5209 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, |
4633 | &(bp->fp[index].state), 0); | 5210 | &(bp->fp[index].state), 0); |
4634 | |||
4635 | } | 5211 | } |
4636 | 5212 | ||
4637 | |||
4638 | static int bnx2x_poll(struct napi_struct *napi, int budget); | 5213 | static int bnx2x_poll(struct napi_struct *napi, int budget); |
4639 | static void bnx2x_set_rx_mode(struct net_device *dev); | 5214 | static void bnx2x_set_rx_mode(struct net_device *dev); |
4640 | 5215 | ||
4641 | static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | 5216 | /* must be called with rtnl_lock */ |
5217 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | ||
4642 | { | 5218 | { |
4643 | u32 load_code; | 5219 | u32 load_code; |
4644 | int i; | 5220 | int i, rc; |
5221 | |||
5222 | #ifdef BNX2X_STOP_ON_ERROR | ||
5223 | if (unlikely(bp->panic)) | ||
5224 | return -EPERM; | ||
5225 | #endif | ||
4645 | 5226 | ||
4646 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | 5227 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; |
4647 | 5228 | ||
4648 | /* Send LOAD_REQUEST command to MCP. | 5229 | /* Send LOAD_REQUEST command to MCP |
4649 | Returns the type of LOAD command: if it is the | 5230 | Returns the type of LOAD command: |
4650 | first port to be initialized common blocks should be | 5231 | if it is the first port to be initialized |
4651 | initialized, otherwise - not. | 5232 | common blocks should be initialized, otherwise - not |
4652 | */ | 5233 | */ |
4653 | if (!nomcp) { | 5234 | if (!BP_NOMCP(bp)) { |
4654 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); | 5235 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); |
4655 | if (!load_code) { | 5236 | if (!load_code) { |
4656 | BNX2X_ERR("MCP response failure, unloading\n"); | 5237 | BNX2X_ERR("MCP response failure, unloading\n"); |
4657 | return -EBUSY; | 5238 | return -EBUSY; |
4658 | } | 5239 | } |
4659 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { | 5240 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) |
4660 | BNX2X_ERR("MCP refused load request, unloading\n"); | ||
4661 | return -EBUSY; /* other port in diagnostic mode */ | 5241 | return -EBUSY; /* other port in diagnostic mode */ |
4662 | } | 5242 | |
4663 | } else { | 5243 | } else { |
4664 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; | 5244 | DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", |
5245 | load_count[0], load_count[1], load_count[2]); | ||
5246 | load_count[0]++; | ||
5247 | load_count[1 + BP_PORT(bp)]++; | ||
5248 | DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", | ||
5249 | load_count[0], load_count[1], load_count[2]); | ||
5250 | if (load_count[0] == 1) | ||
5251 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; | ||
5252 | else if (load_count[1 + BP_PORT(bp)] == 1) | ||
5253 | load_code = FW_MSG_CODE_DRV_LOAD_PORT; | ||
5254 | else | ||
5255 | load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; | ||
4665 | } | 5256 | } |
4666 | 5257 | ||
4667 | /* if we can't use msix we only need one fp, | 5258 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || |
4668 | * so try to enable msix with the requested number of fp's | 5259 | (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) |
5260 | bp->port.pmf = 1; | ||
5261 | else | ||
5262 | bp->port.pmf = 0; | ||
5263 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | ||
5264 | |||
5265 | /* if we can't use MSI-X we only need one fp, | ||
5266 | * so try to enable MSI-X with the requested number of fp's | ||
4669 | * and fallback to inta with one fp | 5267 | * and fallback to inta with one fp |
4670 | */ | 5268 | */ |
4671 | if (req_irq) { | 5269 | if (use_inta) { |
4672 | if (use_inta) { | 5270 | bp->num_queues = 1; |
5271 | |||
5272 | } else { | ||
5273 | if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp))) | ||
5274 | /* user requested number */ | ||
5275 | bp->num_queues = use_multi; | ||
5276 | |||
5277 | else if (use_multi) | ||
5278 | bp->num_queues = min_t(u32, num_online_cpus(), | ||
5279 | BP_MAX_QUEUES(bp)); | ||
5280 | else | ||
4673 | bp->num_queues = 1; | 5281 | bp->num_queues = 1; |
4674 | } else { | 5282 | |
4675 | if ((use_multi > 1) && (use_multi <= 16)) | 5283 | if (bnx2x_enable_msix(bp)) { |
4676 | /* user requested number */ | 5284 | /* failed to enable MSI-X */ |
4677 | bp->num_queues = use_multi; | 5285 | bp->num_queues = 1; |
4678 | else if (use_multi == 1) | 5286 | if (use_multi) |
4679 | bp->num_queues = num_online_cpus(); | 5287 | BNX2X_ERR("Multi requested but failed" |
4680 | else | 5288 | " to enable MSI-X\n"); |
4681 | bp->num_queues = 1; | ||
4682 | |||
4683 | if (bnx2x_enable_msix(bp)) { | ||
4684 | /* failed to enable msix */ | ||
4685 | bp->num_queues = 1; | ||
4686 | if (use_multi) | ||
4687 | BNX2X_ERR("Multi requested but failed" | ||
4688 | " to enable MSI-X\n"); | ||
4689 | } | ||
4690 | } | 5289 | } |
4691 | } | 5290 | } |
4692 | 5291 | DP(NETIF_MSG_IFUP, | |
4693 | DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues); | 5292 | "set number of queues to %d\n", bp->num_queues); |
4694 | 5293 | ||
4695 | if (bnx2x_alloc_mem(bp)) | 5294 | if (bnx2x_alloc_mem(bp)) |
4696 | return -ENOMEM; | 5295 | return -ENOMEM; |
4697 | 5296 | ||
4698 | if (req_irq) { | 5297 | /* Disable interrupt handling until HW is initialized */ |
4699 | if (bp->flags & USING_MSIX_FLAG) { | 5298 | atomic_set(&bp->intr_sem, 1); |
4700 | if (bnx2x_req_msix_irqs(bp)) { | ||
4701 | pci_disable_msix(bp->pdev); | ||
4702 | goto load_error; | ||
4703 | } | ||
4704 | 5299 | ||
4705 | } else { | 5300 | if (bp->flags & USING_MSIX_FLAG) { |
4706 | if (bnx2x_req_irq(bp)) { | 5301 | rc = bnx2x_req_msix_irqs(bp); |
4707 | BNX2X_ERR("IRQ request failed, aborting\n"); | 5302 | if (rc) { |
4708 | goto load_error; | 5303 | pci_disable_msix(bp->pdev); |
4709 | } | 5304 | goto load_error; |
5305 | } | ||
5306 | } else { | ||
5307 | bnx2x_ack_int(bp); | ||
5308 | rc = bnx2x_req_irq(bp); | ||
5309 | if (rc) { | ||
5310 | BNX2X_ERR("IRQ request failed, aborting\n"); | ||
5311 | goto load_error; | ||
4710 | } | 5312 | } |
4711 | } | 5313 | } |
4712 | 5314 | ||
@@ -4714,26 +5316,25 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | |||
4714 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | 5316 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), |
4715 | bnx2x_poll, 128); | 5317 | bnx2x_poll, 128); |
4716 | 5318 | ||
4717 | |||
4718 | /* Initialize HW */ | 5319 | /* Initialize HW */ |
4719 | if (bnx2x_function_init(bp, | 5320 | rc = bnx2x_init_hw(bp, load_code); |
4720 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON))) { | 5321 | if (rc) { |
4721 | BNX2X_ERR("HW init failed, aborting\n"); | 5322 | BNX2X_ERR("HW init failed, aborting\n"); |
4722 | goto load_error; | 5323 | goto load_error; |
4723 | } | 5324 | } |
4724 | 5325 | ||
4725 | 5326 | /* Enable interrupt handling */ | |
4726 | atomic_set(&bp->intr_sem, 0); | 5327 | atomic_set(&bp->intr_sem, 0); |
4727 | 5328 | ||
4728 | |||
4729 | /* Setup NIC internals and enable interrupts */ | 5329 | /* Setup NIC internals and enable interrupts */ |
4730 | bnx2x_nic_init(bp); | 5330 | bnx2x_nic_init(bp); |
4731 | 5331 | ||
4732 | /* Send LOAD_DONE command to MCP */ | 5332 | /* Send LOAD_DONE command to MCP */ |
4733 | if (!nomcp) { | 5333 | if (!BP_NOMCP(bp)) { |
4734 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); | 5334 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); |
4735 | if (!load_code) { | 5335 | if (!load_code) { |
4736 | BNX2X_ERR("MCP response failure, unloading\n"); | 5336 | BNX2X_ERR("MCP response failure, unloading\n"); |
5337 | rc = -EBUSY; | ||
4737 | goto load_int_disable; | 5338 | goto load_int_disable; |
4738 | } | 5339 | } |
4739 | } | 5340 | } |
@@ -4745,33 +5346,68 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | |||
4745 | for_each_queue(bp, i) | 5346 | for_each_queue(bp, i) |
4746 | napi_enable(&bnx2x_fp(bp, i, napi)); | 5347 | napi_enable(&bnx2x_fp(bp, i, napi)); |
4747 | 5348 | ||
4748 | if (bnx2x_setup_leading(bp)) | 5349 | rc = bnx2x_setup_leading(bp); |
5350 | if (rc) { | ||
5351 | #ifdef BNX2X_STOP_ON_ERROR | ||
5352 | bp->panic = 1; | ||
5353 | #endif | ||
4749 | goto load_stop_netif; | 5354 | goto load_stop_netif; |
5355 | } | ||
4750 | 5356 | ||
4751 | for_each_nondefault_queue(bp, i) | 5357 | if (CHIP_IS_E1H(bp)) |
4752 | if (bnx2x_setup_multi(bp, i)) | 5358 | if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { |
4753 | goto load_stop_netif; | 5359 | BNX2X_ERR("!!! mf_cfg function disabled\n"); |
5360 | bp->state = BNX2X_STATE_DISABLED; | ||
5361 | } | ||
4754 | 5362 | ||
4755 | bnx2x_set_mac_addr(bp); | 5363 | if (bp->state == BNX2X_STATE_OPEN) |
5364 | for_each_nondefault_queue(bp, i) { | ||
5365 | rc = bnx2x_setup_multi(bp, i); | ||
5366 | if (rc) | ||
5367 | goto load_stop_netif; | ||
5368 | } | ||
5369 | |||
5370 | if (CHIP_IS_E1(bp)) | ||
5371 | bnx2x_set_mac_addr_e1(bp); | ||
5372 | else | ||
5373 | bnx2x_set_mac_addr_e1h(bp); | ||
4756 | 5374 | ||
4757 | bnx2x_initial_phy_init(bp); | 5375 | if (bp->port.pmf) |
5376 | bnx2x_initial_phy_init(bp); | ||
4758 | 5377 | ||
4759 | /* Start fast path */ | 5378 | /* Start fast path */ |
4760 | if (req_irq) { /* IRQ is only requested from bnx2x_open */ | 5379 | switch (load_mode) { |
5380 | case LOAD_NORMAL: | ||
5381 | /* Tx queue should be only reenabled */ | ||
5382 | netif_wake_queue(bp->dev); | ||
5383 | bnx2x_set_rx_mode(bp->dev); | ||
5384 | break; | ||
5385 | |||
5386 | case LOAD_OPEN: | ||
5387 | /* IRQ is only requested from bnx2x_open */ | ||
4761 | netif_start_queue(bp->dev); | 5388 | netif_start_queue(bp->dev); |
5389 | bnx2x_set_rx_mode(bp->dev); | ||
4762 | if (bp->flags & USING_MSIX_FLAG) | 5390 | if (bp->flags & USING_MSIX_FLAG) |
4763 | printk(KERN_INFO PFX "%s: using MSI-X\n", | 5391 | printk(KERN_INFO PFX "%s: using MSI-X\n", |
4764 | bp->dev->name); | 5392 | bp->dev->name); |
5393 | break; | ||
4765 | 5394 | ||
4766 | /* Otherwise Tx queue should be only reenabled */ | 5395 | case LOAD_DIAG: |
4767 | } else if (netif_running(bp->dev)) { | ||
4768 | netif_wake_queue(bp->dev); | ||
4769 | bnx2x_set_rx_mode(bp->dev); | 5396 | bnx2x_set_rx_mode(bp->dev); |
5397 | bp->state = BNX2X_STATE_DIAG; | ||
5398 | break; | ||
5399 | |||
5400 | default: | ||
5401 | break; | ||
4770 | } | 5402 | } |
4771 | 5403 | ||
5404 | if (!bp->port.pmf) | ||
5405 | bnx2x__link_status_update(bp); | ||
5406 | |||
4772 | /* start the timer */ | 5407 | /* start the timer */ |
4773 | mod_timer(&bp->timer, jiffies + bp->current_interval); | 5408 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
4774 | 5409 | ||
5410 | |||
4775 | return 0; | 5411 | return 0; |
4776 | 5412 | ||
4777 | load_stop_netif: | 5413 | load_stop_netif: |
@@ -4781,7 +5417,7 @@ load_stop_netif: | |||
4781 | load_int_disable: | 5417 | load_int_disable: |
4782 | bnx2x_int_disable_sync(bp); | 5418 | bnx2x_int_disable_sync(bp); |
4783 | 5419 | ||
4784 | bnx2x_free_skbs(bp); | 5420 | /* Release IRQs */ |
4785 | bnx2x_free_irq(bp); | 5421 | bnx2x_free_irq(bp); |
4786 | 5422 | ||
4787 | load_error: | 5423 | load_error: |
@@ -4789,95 +5425,50 @@ load_error: | |||
4789 | 5425 | ||
4790 | /* TBD we really need to reset the chip | 5426 | /* TBD we really need to reset the chip |
4791 | if we want to recover from this */ | 5427 | if we want to recover from this */ |
4792 | return -EBUSY; | 5428 | return rc; |
4793 | } | ||
4794 | |||
4795 | |||
4796 | static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) | ||
4797 | { | ||
4798 | int port = bp->port; | ||
4799 | #ifdef USE_DMAE | ||
4800 | u32 wb_write[2]; | ||
4801 | #endif | ||
4802 | int base, i; | ||
4803 | |||
4804 | DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code); | ||
4805 | |||
4806 | /* Do not rcv packets to BRB */ | ||
4807 | REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); | ||
4808 | /* Do not direct rcv packets that are not for MCP to the BRB */ | ||
4809 | REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : | ||
4810 | NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); | ||
4811 | |||
4812 | /* Configure IGU and AEU */ | ||
4813 | REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000); | ||
4814 | REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); | ||
4815 | |||
4816 | /* TODO: Close Doorbell port? */ | ||
4817 | |||
4818 | /* Clear ILT */ | ||
4819 | #ifdef USE_DMAE | ||
4820 | wb_write[0] = 0; | ||
4821 | wb_write[1] = 0; | ||
4822 | #endif | ||
4823 | base = port * RQ_ONCHIP_AT_PORT_SIZE; | ||
4824 | for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) { | ||
4825 | #ifdef USE_DMAE | ||
4826 | REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2); | ||
4827 | #else | ||
4828 | REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0); | ||
4829 | REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0); | ||
4830 | #endif | ||
4831 | } | ||
4832 | |||
4833 | if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) { | ||
4834 | /* reset_common */ | ||
4835 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | ||
4836 | 0xd3ffff7f); | ||
4837 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
4838 | 0x1403); | ||
4839 | } | ||
4840 | } | 5429 | } |
4841 | 5430 | ||
4842 | static int bnx2x_stop_multi(struct bnx2x *bp, int index) | 5431 | static int bnx2x_stop_multi(struct bnx2x *bp, int index) |
4843 | { | 5432 | { |
4844 | |||
4845 | int rc; | 5433 | int rc; |
4846 | 5434 | ||
4847 | /* halt the connection */ | 5435 | /* halt the connection */ |
4848 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; | 5436 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; |
4849 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); | 5437 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); |
4850 | 5438 | ||
4851 | 5439 | /* Wait for completion */ | |
4852 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, | 5440 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, |
4853 | &(bp->fp[index].state), 1); | 5441 | &(bp->fp[index].state), 1); |
4854 | if (rc) /* timeout */ | 5442 | if (rc) /* timeout */ |
4855 | return rc; | 5443 | return rc; |
4856 | 5444 | ||
4857 | /* delete cfc entry */ | 5445 | /* delete cfc entry */ |
4858 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); | 5446 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); |
4859 | 5447 | ||
4860 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, | 5448 | /* Wait for completion */ |
4861 | &(bp->fp[index].state), 1); | 5449 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, |
4862 | 5450 | &(bp->fp[index].state), 1); | |
5451 | return rc; | ||
4863 | } | 5452 | } |
4864 | 5453 | ||
4865 | |||
4866 | static void bnx2x_stop_leading(struct bnx2x *bp) | 5454 | static void bnx2x_stop_leading(struct bnx2x *bp) |
4867 | { | 5455 | { |
4868 | u16 dsb_sp_prod_idx; | 5456 | u16 dsb_sp_prod_idx; |
4869 | /* if the other port is handling traffic, | 5457 | /* if the other port is handling traffic, |
4870 | this can take a lot of time */ | 5458 | this can take a lot of time */ |
4871 | int timeout = 500; | 5459 | int cnt = 500; |
5460 | int rc; | ||
4872 | 5461 | ||
4873 | might_sleep(); | 5462 | might_sleep(); |
4874 | 5463 | ||
4875 | /* Send HALT ramrod */ | 5464 | /* Send HALT ramrod */ |
4876 | bp->fp[0].state = BNX2X_FP_STATE_HALTING; | 5465 | bp->fp[0].state = BNX2X_FP_STATE_HALTING; |
4877 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0); | 5466 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0); |
4878 | 5467 | ||
4879 | if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, | 5468 | /* Wait for completion */ |
4880 | &(bp->fp[0].state), 1)) | 5469 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, |
5470 | &(bp->fp[0].state), 1); | ||
5471 | if (rc) /* timeout */ | ||
4881 | return; | 5472 | return; |
4882 | 5473 | ||
4883 | dsb_sp_prod_idx = *bp->dsb_sp_prod; | 5474 | dsb_sp_prod_idx = *bp->dsb_sp_prod; |
@@ -4889,29 +5480,110 @@ static void bnx2x_stop_leading(struct bnx2x *bp) | |||
4889 | we are going to reset the chip anyway | 5480 | we are going to reset the chip anyway |
4890 | so there is not much to do if this times out | 5481 | so there is not much to do if this times out |
4891 | */ | 5482 | */ |
4892 | while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) { | 5483 | while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { |
4893 | timeout--; | ||
4894 | msleep(1); | 5484 | msleep(1); |
4895 | } | 5485 | if (!cnt) { |
4896 | if (!timeout) { | 5486 | DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " |
4897 | DP(NETIF_MSG_IFDOWN, "timeout polling for completion " | 5487 | "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", |
4898 | "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", | 5488 | *bp->dsb_sp_prod, dsb_sp_prod_idx); |
4899 | *bp->dsb_sp_prod, dsb_sp_prod_idx); | 5489 | #ifdef BNX2X_STOP_ON_ERROR |
5490 | bnx2x_panic(); | ||
5491 | #endif | ||
5492 | break; | ||
5493 | } | ||
5494 | cnt--; | ||
4900 | } | 5495 | } |
4901 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; | 5496 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; |
4902 | bp->fp[0].state = BNX2X_FP_STATE_CLOSED; | 5497 | bp->fp[0].state = BNX2X_FP_STATE_CLOSED; |
4903 | } | 5498 | } |
4904 | 5499 | ||
5500 | static void bnx2x_reset_func(struct bnx2x *bp) | ||
5501 | { | ||
5502 | int port = BP_PORT(bp); | ||
5503 | int func = BP_FUNC(bp); | ||
5504 | int base, i; | ||
5505 | |||
5506 | /* Configure IGU */ | ||
5507 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); | ||
5508 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); | ||
5509 | |||
5510 | REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000); | ||
5511 | |||
5512 | /* Clear ILT */ | ||
5513 | base = FUNC_ILT_BASE(func); | ||
5514 | for (i = base; i < base + ILT_PER_FUNC; i++) | ||
5515 | bnx2x_ilt_wr(bp, i, 0); | ||
5516 | } | ||
5517 | |||
5518 | static void bnx2x_reset_port(struct bnx2x *bp) | ||
5519 | { | ||
5520 | int port = BP_PORT(bp); | ||
5521 | u32 val; | ||
5522 | |||
5523 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); | ||
5524 | |||
5525 | /* Do not rcv packets to BRB */ | ||
5526 | REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); | ||
5527 | /* Do not direct rcv packets that are not for MCP to the BRB */ | ||
5528 | REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : | ||
5529 | NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); | ||
5530 | |||
5531 | /* Configure AEU */ | ||
5532 | REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); | ||
5533 | |||
5534 | msleep(100); | ||
5535 | /* Check for BRB port occupancy */ | ||
5536 | val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); | ||
5537 | if (val) | ||
5538 | DP(NETIF_MSG_IFDOWN, | ||
5539 | "BRB1 is not empty %d blooks are occupied\n", val); | ||
5540 | |||
5541 | /* TODO: Close Doorbell port? */ | ||
5542 | } | ||
5543 | |||
5544 | static void bnx2x_reset_common(struct bnx2x *bp) | ||
5545 | { | ||
5546 | /* reset_common */ | ||
5547 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | ||
5548 | 0xd3ffff7f); | ||
5549 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403); | ||
5550 | } | ||
4905 | 5551 | ||
4906 | static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq) | 5552 | static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) |
5553 | { | ||
5554 | DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", | ||
5555 | BP_FUNC(bp), reset_code); | ||
5556 | |||
5557 | switch (reset_code) { | ||
5558 | case FW_MSG_CODE_DRV_UNLOAD_COMMON: | ||
5559 | bnx2x_reset_port(bp); | ||
5560 | bnx2x_reset_func(bp); | ||
5561 | bnx2x_reset_common(bp); | ||
5562 | break; | ||
5563 | |||
5564 | case FW_MSG_CODE_DRV_UNLOAD_PORT: | ||
5565 | bnx2x_reset_port(bp); | ||
5566 | bnx2x_reset_func(bp); | ||
5567 | break; | ||
5568 | |||
5569 | case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: | ||
5570 | bnx2x_reset_func(bp); | ||
5571 | break; | ||
5572 | |||
5573 | default: | ||
5574 | BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code); | ||
5575 | break; | ||
5576 | } | ||
5577 | } | ||
5578 | |||
5579 | /* msut be called with rtnl_lock */ | ||
5580 | static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | ||
4907 | { | 5581 | { |
4908 | u32 reset_code = 0; | 5582 | u32 reset_code = 0; |
4909 | int i, timeout; | 5583 | int i, cnt; |
4910 | 5584 | ||
4911 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | 5585 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; |
4912 | 5586 | ||
4913 | del_timer_sync(&bp->timer); | ||
4914 | |||
4915 | bp->rx_mode = BNX2X_RX_MODE_NONE; | 5587 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
4916 | bnx2x_set_storm_rx_mode(bp); | 5588 | bnx2x_set_storm_rx_mode(bp); |
4917 | 5589 | ||
@@ -4920,21 +5592,44 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq) | |||
4920 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | 5592 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ |
4921 | } | 5593 | } |
4922 | 5594 | ||
5595 | del_timer_sync(&bp->timer); | ||
5596 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, | ||
5597 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | ||
5598 | |||
4923 | /* Wait until all fast path tasks complete */ | 5599 | /* Wait until all fast path tasks complete */ |
4924 | for_each_queue(bp, i) { | 5600 | for_each_queue(bp, i) { |
4925 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 5601 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
4926 | 5602 | ||
4927 | timeout = 1000; | 5603 | #ifdef BNX2X_STOP_ON_ERROR |
4928 | while (bnx2x_has_work(fp) && (timeout--)) | 5604 | #ifdef __powerpc64__ |
5605 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", | ||
5606 | #else | ||
5607 | DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n", | ||
5608 | #endif | ||
5609 | fp->tpa_queue_used); | ||
5610 | #endif | ||
5611 | cnt = 1000; | ||
5612 | smp_rmb(); | ||
5613 | while (bnx2x_has_work(fp)) { | ||
4929 | msleep(1); | 5614 | msleep(1); |
4930 | if (!timeout) | 5615 | if (!cnt) { |
4931 | BNX2X_ERR("timeout waiting for queue[%d]\n", i); | 5616 | BNX2X_ERR("timeout waiting for queue[%d]\n", |
5617 | i); | ||
5618 | #ifdef BNX2X_STOP_ON_ERROR | ||
5619 | bnx2x_panic(); | ||
5620 | return -EBUSY; | ||
5621 | #else | ||
5622 | break; | ||
5623 | #endif | ||
5624 | } | ||
5625 | cnt--; | ||
5626 | smp_rmb(); | ||
5627 | } | ||
4932 | } | 5628 | } |
4933 | 5629 | ||
4934 | /* Wait until stat ramrod returns and all SP tasks complete */ | 5630 | /* Wait until all slow path tasks complete */ |
4935 | timeout = 1000; | 5631 | cnt = 1000; |
4936 | while ((bp->stat_pending || (bp->spq_left != MAX_SPQ_PENDING)) && | 5632 | while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--) |
4937 | (timeout--)) | ||
4938 | msleep(1); | 5633 | msleep(1); |
4939 | 5634 | ||
4940 | for_each_queue(bp, i) | 5635 | for_each_queue(bp, i) |
@@ -4942,59 +5637,84 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq) | |||
4942 | /* Disable interrupts after Tx and Rx are disabled on stack level */ | 5637 | /* Disable interrupts after Tx and Rx are disabled on stack level */ |
4943 | bnx2x_int_disable_sync(bp); | 5638 | bnx2x_int_disable_sync(bp); |
4944 | 5639 | ||
5640 | /* Release IRQs */ | ||
5641 | bnx2x_free_irq(bp); | ||
5642 | |||
4945 | if (bp->flags & NO_WOL_FLAG) | 5643 | if (bp->flags & NO_WOL_FLAG) |
4946 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; | 5644 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; |
4947 | 5645 | ||
4948 | else if (bp->wol) { | 5646 | else if (bp->wol) { |
4949 | u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1; | 5647 | u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
4950 | u8 *mac_addr = bp->dev->dev_addr; | 5648 | u8 *mac_addr = bp->dev->dev_addr; |
4951 | u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD | | 5649 | u32 val; |
4952 | EMAC_MODE_ACPI_RCVD); | ||
4953 | |||
4954 | EMAC_WR(EMAC_REG_EMAC_MODE, val); | ||
4955 | 5650 | ||
5651 | /* The mac address is written to entries 1-4 to | ||
5652 | preserve entry 0 which is used by the PMF */ | ||
4956 | val = (mac_addr[0] << 8) | mac_addr[1]; | 5653 | val = (mac_addr[0] << 8) | mac_addr[1]; |
4957 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val); | 5654 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val); |
4958 | 5655 | ||
4959 | val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | | 5656 | val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | |
4960 | (mac_addr[4] << 8) | mac_addr[5]; | 5657 | (mac_addr[4] << 8) | mac_addr[5]; |
4961 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val); | 5658 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4, |
5659 | val); | ||
4962 | 5660 | ||
4963 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | 5661 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; |
4964 | 5662 | ||
4965 | } else | 5663 | } else |
4966 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 5664 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
4967 | 5665 | ||
4968 | /* Close multi and leading connections */ | 5666 | /* Close multi and leading connections |
5667 | Completions for ramrods are collected in a synchronous way */ | ||
4969 | for_each_nondefault_queue(bp, i) | 5668 | for_each_nondefault_queue(bp, i) |
4970 | if (bnx2x_stop_multi(bp, i)) | 5669 | if (bnx2x_stop_multi(bp, i)) |
4971 | goto unload_error; | 5670 | goto unload_error; |
4972 | 5671 | ||
5672 | if (CHIP_IS_E1H(bp)) | ||
5673 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0); | ||
5674 | |||
4973 | bnx2x_stop_leading(bp); | 5675 | bnx2x_stop_leading(bp); |
5676 | #ifdef BNX2X_STOP_ON_ERROR | ||
5677 | /* If ramrod completion timed out - break here! */ | ||
5678 | if (bp->panic) { | ||
5679 | BNX2X_ERR("Stop leading failed!\n"); | ||
5680 | return -EBUSY; | ||
5681 | } | ||
5682 | #endif | ||
5683 | |||
4974 | if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) || | 5684 | if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) || |
4975 | (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) { | 5685 | (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) { |
4976 | DP(NETIF_MSG_IFDOWN, "failed to close leading properly!" | 5686 | DP(NETIF_MSG_IFDOWN, "failed to close leading properly! " |
4977 | "state 0x%x fp[0].state 0x%x", | 5687 | "state 0x%x fp[0].state 0x%x\n", |
4978 | bp->state, bp->fp[0].state); | 5688 | bp->state, bp->fp[0].state); |
4979 | } | 5689 | } |
4980 | 5690 | ||
4981 | unload_error: | 5691 | unload_error: |
4982 | bnx2x__link_reset(bp); | 5692 | if (!BP_NOMCP(bp)) |
4983 | |||
4984 | if (!nomcp) | ||
4985 | reset_code = bnx2x_fw_command(bp, reset_code); | 5693 | reset_code = bnx2x_fw_command(bp, reset_code); |
4986 | else | 5694 | else { |
4987 | reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; | 5695 | DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n", |
5696 | load_count[0], load_count[1], load_count[2]); | ||
5697 | load_count[0]--; | ||
5698 | load_count[1 + BP_PORT(bp)]--; | ||
5699 | DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n", | ||
5700 | load_count[0], load_count[1], load_count[2]); | ||
5701 | if (load_count[0] == 0) | ||
5702 | reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; | ||
5703 | else if (load_count[1 + BP_PORT(bp)] == 0) | ||
5704 | reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; | ||
5705 | else | ||
5706 | reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; | ||
5707 | } | ||
4988 | 5708 | ||
4989 | /* Release IRQs */ | 5709 | if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) || |
4990 | if (free_irq) | 5710 | (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT)) |
4991 | bnx2x_free_irq(bp); | 5711 | bnx2x__link_reset(bp); |
4992 | 5712 | ||
4993 | /* Reset the chip */ | 5713 | /* Reset the chip */ |
4994 | bnx2x_reset_chip(bp, reset_code); | 5714 | bnx2x_reset_chip(bp, reset_code); |
4995 | 5715 | ||
4996 | /* Report UNLOAD_DONE to MCP */ | 5716 | /* Report UNLOAD_DONE to MCP */ |
4997 | if (!nomcp) | 5717 | if (!BP_NOMCP(bp)) |
4998 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | 5718 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); |
4999 | 5719 | ||
5000 | /* Free SKBs and driver internals */ | 5720 | /* Free SKBs and driver internals */ |
@@ -5008,6 +5728,29 @@ unload_error: | |||
5008 | return 0; | 5728 | return 0; |
5009 | } | 5729 | } |
5010 | 5730 | ||
5731 | static void bnx2x_reset_task(struct work_struct *work) | ||
5732 | { | ||
5733 | struct bnx2x *bp = container_of(work, struct bnx2x, reset_task); | ||
5734 | |||
5735 | #ifdef BNX2X_STOP_ON_ERROR | ||
5736 | BNX2X_ERR("reset task called but STOP_ON_ERROR defined" | ||
5737 | " so reset not done to allow debug dump,\n" | ||
5738 | KERN_ERR " you will need to reboot when done\n"); | ||
5739 | return; | ||
5740 | #endif | ||
5741 | |||
5742 | rtnl_lock(); | ||
5743 | |||
5744 | if (!netif_running(bp->dev)) | ||
5745 | goto reset_task_exit; | ||
5746 | |||
5747 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
5748 | bnx2x_nic_load(bp, LOAD_NORMAL); | ||
5749 | |||
5750 | reset_task_exit: | ||
5751 | rtnl_unlock(); | ||
5752 | } | ||
5753 | |||
5011 | /* end of nic load/unload */ | 5754 | /* end of nic load/unload */ |
5012 | 5755 | ||
5013 | /* ethtool_ops */ | 5756 | /* ethtool_ops */ |
@@ -5016,9 +5759,139 @@ unload_error: | |||
5016 | * Init service functions | 5759 | * Init service functions |
5017 | */ | 5760 | */ |
5018 | 5761 | ||
5019 | static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | 5762 | static void __devinit bnx2x_undi_unload(struct bnx2x *bp) |
5763 | { | ||
5764 | u32 val; | ||
5765 | |||
5766 | /* Check if there is any driver already loaded */ | ||
5767 | val = REG_RD(bp, MISC_REG_UNPREPARED); | ||
5768 | if (val == 0x1) { | ||
5769 | /* Check if it is the UNDI driver | ||
5770 | * UNDI driver initializes CID offset for normal bell to 0x7 | ||
5771 | */ | ||
5772 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); | ||
5773 | if (val == 0x7) { | ||
5774 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | ||
5775 | /* save our func and fw_seq */ | ||
5776 | int func = BP_FUNC(bp); | ||
5777 | u16 fw_seq = bp->fw_seq; | ||
5778 | |||
5779 | BNX2X_DEV_INFO("UNDI is active! reset device\n"); | ||
5780 | |||
5781 | /* try unload UNDI on port 0 */ | ||
5782 | bp->func = 0; | ||
5783 | bp->fw_seq = (SHMEM_RD(bp, | ||
5784 | func_mb[bp->func].drv_mb_header) & | ||
5785 | DRV_MSG_SEQ_NUMBER_MASK); | ||
5786 | |||
5787 | reset_code = bnx2x_fw_command(bp, reset_code); | ||
5788 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | ||
5789 | |||
5790 | /* if UNDI is loaded on the other port */ | ||
5791 | if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { | ||
5792 | |||
5793 | bp->func = 1; | ||
5794 | bp->fw_seq = (SHMEM_RD(bp, | ||
5795 | func_mb[bp->func].drv_mb_header) & | ||
5796 | DRV_MSG_SEQ_NUMBER_MASK); | ||
5797 | |||
5798 | bnx2x_fw_command(bp, | ||
5799 | DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS); | ||
5800 | bnx2x_fw_command(bp, | ||
5801 | DRV_MSG_CODE_UNLOAD_DONE); | ||
5802 | |||
5803 | /* restore our func and fw_seq */ | ||
5804 | bp->func = func; | ||
5805 | bp->fw_seq = fw_seq; | ||
5806 | } | ||
5807 | |||
5808 | /* reset device */ | ||
5809 | REG_WR(bp, | ||
5810 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | ||
5811 | 0xd3ffff7f); | ||
5812 | REG_WR(bp, | ||
5813 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
5814 | 0x1403); | ||
5815 | } | ||
5816 | } | ||
5817 | } | ||
5818 | |||
5819 | static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | ||
5820 | { | ||
5821 | u32 val, val2, val3, val4, id; | ||
5822 | |||
5823 | /* Get the chip revision id and number. */ | ||
5824 | /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ | ||
5825 | val = REG_RD(bp, MISC_REG_CHIP_NUM); | ||
5826 | id = ((val & 0xffff) << 16); | ||
5827 | val = REG_RD(bp, MISC_REG_CHIP_REV); | ||
5828 | id |= ((val & 0xf) << 12); | ||
5829 | val = REG_RD(bp, MISC_REG_CHIP_METAL); | ||
5830 | id |= ((val & 0xff) << 4); | ||
5831 | REG_RD(bp, MISC_REG_BOND_ID); | ||
5832 | id |= (val & 0xf); | ||
5833 | bp->common.chip_id = id; | ||
5834 | bp->link_params.chip_id = bp->common.chip_id; | ||
5835 | BNX2X_DEV_INFO("chip ID is 0x%x\n", id); | ||
5836 | |||
5837 | val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); | ||
5838 | bp->common.flash_size = (NVRAM_1MB_SIZE << | ||
5839 | (val & MCPR_NVM_CFG4_FLASH_SIZE)); | ||
5840 | BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", | ||
5841 | bp->common.flash_size, bp->common.flash_size); | ||
5842 | |||
5843 | bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); | ||
5844 | bp->link_params.shmem_base = bp->common.shmem_base; | ||
5845 | BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base); | ||
5846 | |||
5847 | if (!bp->common.shmem_base || | ||
5848 | (bp->common.shmem_base < 0xA0000) || | ||
5849 | (bp->common.shmem_base >= 0xC0000)) { | ||
5850 | BNX2X_DEV_INFO("MCP not active\n"); | ||
5851 | bp->flags |= NO_MCP_FLAG; | ||
5852 | return; | ||
5853 | } | ||
5854 | |||
5855 | val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); | ||
5856 | if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | ||
5857 | != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | ||
5858 | BNX2X_ERR("BAD MCP validity signature\n"); | ||
5859 | |||
5860 | bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); | ||
5861 | bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board); | ||
5862 | |||
5863 | BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n", | ||
5864 | bp->common.hw_config, bp->common.board); | ||
5865 | |||
5866 | bp->link_params.hw_led_mode = ((bp->common.hw_config & | ||
5867 | SHARED_HW_CFG_LED_MODE_MASK) >> | ||
5868 | SHARED_HW_CFG_LED_MODE_SHIFT); | ||
5869 | |||
5870 | val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; | ||
5871 | bp->common.bc_ver = val; | ||
5872 | BNX2X_DEV_INFO("bc_ver %X\n", val); | ||
5873 | if (val < BNX2X_BC_VER) { | ||
5874 | /* for now only warn | ||
5875 | * later we might need to enforce this */ | ||
5876 | BNX2X_ERR("This driver needs bc_ver %X but found %X," | ||
5877 | " please upgrade BC\n", BNX2X_BC_VER, val); | ||
5878 | } | ||
5879 | BNX2X_DEV_INFO("%sWoL Capable\n", | ||
5880 | (bp->flags & NO_WOL_FLAG)? "Not " : ""); | ||
5881 | |||
5882 | val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); | ||
5883 | val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); | ||
5884 | val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); | ||
5885 | val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); | ||
5886 | |||
5887 | printk(KERN_INFO PFX "part number %X-%X-%X-%X\n", | ||
5888 | val, val2, val3, val4); | ||
5889 | } | ||
5890 | |||
5891 | static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, | ||
5892 | u32 switch_cfg) | ||
5020 | { | 5893 | { |
5021 | int port = bp->port; | 5894 | int port = BP_PORT(bp); |
5022 | u32 ext_phy_type; | 5895 | u32 ext_phy_type; |
5023 | 5896 | ||
5024 | switch (switch_cfg) { | 5897 | switch (switch_cfg) { |
@@ -5032,31 +5905,33 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
5032 | BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n", | 5905 | BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n", |
5033 | ext_phy_type); | 5906 | ext_phy_type); |
5034 | 5907 | ||
5035 | bp->supported |= (SUPPORTED_10baseT_Half | | 5908 | bp->port.supported |= (SUPPORTED_10baseT_Half | |
5036 | SUPPORTED_10baseT_Full | | 5909 | SUPPORTED_10baseT_Full | |
5037 | SUPPORTED_100baseT_Half | | 5910 | SUPPORTED_100baseT_Half | |
5038 | SUPPORTED_100baseT_Full | | 5911 | SUPPORTED_100baseT_Full | |
5039 | SUPPORTED_1000baseT_Full | | 5912 | SUPPORTED_1000baseT_Full | |
5040 | SUPPORTED_2500baseX_Full | | 5913 | SUPPORTED_2500baseX_Full | |
5041 | SUPPORTED_TP | SUPPORTED_FIBRE | | 5914 | SUPPORTED_TP | |
5042 | SUPPORTED_Autoneg | | 5915 | SUPPORTED_FIBRE | |
5043 | SUPPORTED_Pause | | 5916 | SUPPORTED_Autoneg | |
5044 | SUPPORTED_Asym_Pause); | 5917 | SUPPORTED_Pause | |
5918 | SUPPORTED_Asym_Pause); | ||
5045 | break; | 5919 | break; |
5046 | 5920 | ||
5047 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: | 5921 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: |
5048 | BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n", | 5922 | BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n", |
5049 | ext_phy_type); | 5923 | ext_phy_type); |
5050 | 5924 | ||
5051 | bp->supported |= (SUPPORTED_10baseT_Half | | 5925 | bp->port.supported |= (SUPPORTED_10baseT_Half | |
5052 | SUPPORTED_10baseT_Full | | 5926 | SUPPORTED_10baseT_Full | |
5053 | SUPPORTED_100baseT_Half | | 5927 | SUPPORTED_100baseT_Half | |
5054 | SUPPORTED_100baseT_Full | | 5928 | SUPPORTED_100baseT_Full | |
5055 | SUPPORTED_1000baseT_Full | | 5929 | SUPPORTED_1000baseT_Full | |
5056 | SUPPORTED_TP | SUPPORTED_FIBRE | | 5930 | SUPPORTED_TP | |
5057 | SUPPORTED_Autoneg | | 5931 | SUPPORTED_FIBRE | |
5058 | SUPPORTED_Pause | | 5932 | SUPPORTED_Autoneg | |
5059 | SUPPORTED_Asym_Pause); | 5933 | SUPPORTED_Pause | |
5934 | SUPPORTED_Asym_Pause); | ||
5060 | break; | 5935 | break; |
5061 | 5936 | ||
5062 | default: | 5937 | default: |
@@ -5066,9 +5941,9 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
5066 | return; | 5941 | return; |
5067 | } | 5942 | } |
5068 | 5943 | ||
5069 | bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + | 5944 | bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + |
5070 | port*0x10); | 5945 | port*0x10); |
5071 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr); | 5946 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); |
5072 | break; | 5947 | break; |
5073 | 5948 | ||
5074 | case SWITCH_CFG_10G: | 5949 | case SWITCH_CFG_10G: |
@@ -5081,75 +5956,75 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
5081 | BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n", | 5956 | BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n", |
5082 | ext_phy_type); | 5957 | ext_phy_type); |
5083 | 5958 | ||
5084 | bp->supported |= (SUPPORTED_10baseT_Half | | 5959 | bp->port.supported |= (SUPPORTED_10baseT_Half | |
5085 | SUPPORTED_10baseT_Full | | 5960 | SUPPORTED_10baseT_Full | |
5086 | SUPPORTED_100baseT_Half | | 5961 | SUPPORTED_100baseT_Half | |
5087 | SUPPORTED_100baseT_Full | | 5962 | SUPPORTED_100baseT_Full | |
5088 | SUPPORTED_1000baseT_Full | | 5963 | SUPPORTED_1000baseT_Full | |
5089 | SUPPORTED_2500baseX_Full | | 5964 | SUPPORTED_2500baseX_Full | |
5090 | SUPPORTED_10000baseT_Full | | 5965 | SUPPORTED_10000baseT_Full | |
5091 | SUPPORTED_TP | SUPPORTED_FIBRE | | 5966 | SUPPORTED_TP | |
5092 | SUPPORTED_Autoneg | | 5967 | SUPPORTED_FIBRE | |
5093 | SUPPORTED_Pause | | 5968 | SUPPORTED_Autoneg | |
5094 | SUPPORTED_Asym_Pause); | 5969 | SUPPORTED_Pause | |
5970 | SUPPORTED_Asym_Pause); | ||
5095 | break; | 5971 | break; |
5096 | 5972 | ||
5097 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: | 5973 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: |
5098 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n", | 5974 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n", |
5099 | ext_phy_type); | 5975 | ext_phy_type); |
5100 | 5976 | ||
5101 | bp->supported |= (SUPPORTED_10000baseT_Full | | 5977 | bp->port.supported |= (SUPPORTED_10000baseT_Full | |
5102 | SUPPORTED_FIBRE | | 5978 | SUPPORTED_FIBRE | |
5103 | SUPPORTED_Pause | | 5979 | SUPPORTED_Pause | |
5104 | SUPPORTED_Asym_Pause); | 5980 | SUPPORTED_Asym_Pause); |
5105 | break; | 5981 | break; |
5106 | 5982 | ||
5107 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: | 5983 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: |
5108 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n", | 5984 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n", |
5109 | ext_phy_type); | 5985 | ext_phy_type); |
5110 | 5986 | ||
5111 | bp->supported |= (SUPPORTED_10000baseT_Full | | 5987 | bp->port.supported |= (SUPPORTED_10000baseT_Full | |
5112 | SUPPORTED_1000baseT_Full | | 5988 | SUPPORTED_1000baseT_Full | |
5113 | SUPPORTED_Autoneg | | 5989 | SUPPORTED_FIBRE | |
5114 | SUPPORTED_FIBRE | | 5990 | SUPPORTED_Pause | |
5115 | SUPPORTED_Pause | | 5991 | SUPPORTED_Asym_Pause); |
5116 | SUPPORTED_Asym_Pause); | ||
5117 | break; | 5992 | break; |
5118 | 5993 | ||
5119 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: | 5994 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: |
5120 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n", | 5995 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n", |
5121 | ext_phy_type); | 5996 | ext_phy_type); |
5122 | 5997 | ||
5123 | bp->supported |= (SUPPORTED_10000baseT_Full | | 5998 | bp->port.supported |= (SUPPORTED_10000baseT_Full | |
5124 | SUPPORTED_1000baseT_Full | | 5999 | SUPPORTED_1000baseT_Full | |
5125 | SUPPORTED_FIBRE | | 6000 | SUPPORTED_FIBRE | |
5126 | SUPPORTED_Autoneg | | 6001 | SUPPORTED_Autoneg | |
5127 | SUPPORTED_Pause | | 6002 | SUPPORTED_Pause | |
5128 | SUPPORTED_Asym_Pause); | 6003 | SUPPORTED_Asym_Pause); |
5129 | break; | 6004 | break; |
5130 | 6005 | ||
5131 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: | 6006 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: |
5132 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n", | 6007 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n", |
5133 | ext_phy_type); | 6008 | ext_phy_type); |
5134 | 6009 | ||
5135 | bp->supported |= (SUPPORTED_10000baseT_Full | | 6010 | bp->port.supported |= (SUPPORTED_10000baseT_Full | |
5136 | SUPPORTED_2500baseX_Full | | 6011 | SUPPORTED_2500baseX_Full | |
5137 | SUPPORTED_1000baseT_Full | | 6012 | SUPPORTED_1000baseT_Full | |
5138 | SUPPORTED_FIBRE | | 6013 | SUPPORTED_FIBRE | |
5139 | SUPPORTED_Autoneg | | 6014 | SUPPORTED_Autoneg | |
5140 | SUPPORTED_Pause | | 6015 | SUPPORTED_Pause | |
5141 | SUPPORTED_Asym_Pause); | 6016 | SUPPORTED_Asym_Pause); |
5142 | break; | 6017 | break; |
5143 | 6018 | ||
5144 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | 6019 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: |
5145 | BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n", | 6020 | BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n", |
5146 | ext_phy_type); | 6021 | ext_phy_type); |
5147 | 6022 | ||
5148 | bp->supported |= (SUPPORTED_10000baseT_Full | | 6023 | bp->port.supported |= (SUPPORTED_10000baseT_Full | |
5149 | SUPPORTED_TP | | 6024 | SUPPORTED_TP | |
5150 | SUPPORTED_Autoneg | | 6025 | SUPPORTED_Autoneg | |
5151 | SUPPORTED_Pause | | 6026 | SUPPORTED_Pause | |
5152 | SUPPORTED_Asym_Pause); | 6027 | SUPPORTED_Asym_Pause); |
5153 | break; | 6028 | break; |
5154 | 6029 | ||
5155 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: | 6030 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: |
@@ -5164,61 +6039,61 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
5164 | return; | 6039 | return; |
5165 | } | 6040 | } |
5166 | 6041 | ||
5167 | bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + | 6042 | bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + |
5168 | port*0x18); | 6043 | port*0x18); |
5169 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr); | 6044 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); |
5170 | 6045 | ||
5171 | break; | 6046 | break; |
5172 | 6047 | ||
5173 | default: | 6048 | default: |
5174 | BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", | 6049 | BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", |
5175 | bp->link_config); | 6050 | bp->port.link_config); |
5176 | return; | 6051 | return; |
5177 | } | 6052 | } |
5178 | bp->link_params.phy_addr = bp->phy_addr; | 6053 | bp->link_params.phy_addr = bp->port.phy_addr; |
5179 | 6054 | ||
5180 | /* mask what we support according to speed_cap_mask */ | 6055 | /* mask what we support according to speed_cap_mask */ |
5181 | if (!(bp->link_params.speed_cap_mask & | 6056 | if (!(bp->link_params.speed_cap_mask & |
5182 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) | 6057 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) |
5183 | bp->supported &= ~SUPPORTED_10baseT_Half; | 6058 | bp->port.supported &= ~SUPPORTED_10baseT_Half; |
5184 | 6059 | ||
5185 | if (!(bp->link_params.speed_cap_mask & | 6060 | if (!(bp->link_params.speed_cap_mask & |
5186 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) | 6061 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) |
5187 | bp->supported &= ~SUPPORTED_10baseT_Full; | 6062 | bp->port.supported &= ~SUPPORTED_10baseT_Full; |
5188 | 6063 | ||
5189 | if (!(bp->link_params.speed_cap_mask & | 6064 | if (!(bp->link_params.speed_cap_mask & |
5190 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) | 6065 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) |
5191 | bp->supported &= ~SUPPORTED_100baseT_Half; | 6066 | bp->port.supported &= ~SUPPORTED_100baseT_Half; |
5192 | 6067 | ||
5193 | if (!(bp->link_params.speed_cap_mask & | 6068 | if (!(bp->link_params.speed_cap_mask & |
5194 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) | 6069 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) |
5195 | bp->supported &= ~SUPPORTED_100baseT_Full; | 6070 | bp->port.supported &= ~SUPPORTED_100baseT_Full; |
5196 | 6071 | ||
5197 | if (!(bp->link_params.speed_cap_mask & | 6072 | if (!(bp->link_params.speed_cap_mask & |
5198 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) | 6073 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) |
5199 | bp->supported &= ~(SUPPORTED_1000baseT_Half | | 6074 | bp->port.supported &= ~(SUPPORTED_1000baseT_Half | |
5200 | SUPPORTED_1000baseT_Full); | 6075 | SUPPORTED_1000baseT_Full); |
5201 | 6076 | ||
5202 | if (!(bp->link_params.speed_cap_mask & | 6077 | if (!(bp->link_params.speed_cap_mask & |
5203 | PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) | 6078 | PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) |
5204 | bp->supported &= ~SUPPORTED_2500baseX_Full; | 6079 | bp->port.supported &= ~SUPPORTED_2500baseX_Full; |
5205 | 6080 | ||
5206 | if (!(bp->link_params.speed_cap_mask & | 6081 | if (!(bp->link_params.speed_cap_mask & |
5207 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) | 6082 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) |
5208 | bp->supported &= ~SUPPORTED_10000baseT_Full; | 6083 | bp->port.supported &= ~SUPPORTED_10000baseT_Full; |
5209 | 6084 | ||
5210 | BNX2X_DEV_INFO("supported 0x%x\n", bp->supported); | 6085 | BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported); |
5211 | } | 6086 | } |
5212 | 6087 | ||
5213 | static void bnx2x_link_settings_requested(struct bnx2x *bp) | 6088 | static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) |
5214 | { | 6089 | { |
5215 | bp->link_params.req_duplex = DUPLEX_FULL; | 6090 | bp->link_params.req_duplex = DUPLEX_FULL; |
5216 | 6091 | ||
5217 | switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) { | 6092 | switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) { |
5218 | case PORT_FEATURE_LINK_SPEED_AUTO: | 6093 | case PORT_FEATURE_LINK_SPEED_AUTO: |
5219 | if (bp->supported & SUPPORTED_Autoneg) { | 6094 | if (bp->port.supported & SUPPORTED_Autoneg) { |
5220 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; | 6095 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; |
5221 | bp->advertising = bp->supported; | 6096 | bp->port.advertising = bp->port.supported; |
5222 | } else { | 6097 | } else { |
5223 | u32 ext_phy_type = | 6098 | u32 ext_phy_type = |
5224 | XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); | 6099 | XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); |
@@ -5229,7 +6104,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
5229 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) { | 6104 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) { |
5230 | /* force 10G, no AN */ | 6105 | /* force 10G, no AN */ |
5231 | bp->link_params.req_line_speed = SPEED_10000; | 6106 | bp->link_params.req_line_speed = SPEED_10000; |
5232 | bp->advertising = | 6107 | bp->port.advertising = |
5233 | (ADVERTISED_10000baseT_Full | | 6108 | (ADVERTISED_10000baseT_Full | |
5234 | ADVERTISED_FIBRE); | 6109 | ADVERTISED_FIBRE); |
5235 | break; | 6110 | break; |
@@ -5237,98 +6112,98 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
5237 | BNX2X_ERR("NVRAM config error. " | 6112 | BNX2X_ERR("NVRAM config error. " |
5238 | "Invalid link_config 0x%x" | 6113 | "Invalid link_config 0x%x" |
5239 | " Autoneg not supported\n", | 6114 | " Autoneg not supported\n", |
5240 | bp->link_config); | 6115 | bp->port.link_config); |
5241 | return; | 6116 | return; |
5242 | } | 6117 | } |
5243 | break; | 6118 | break; |
5244 | 6119 | ||
5245 | case PORT_FEATURE_LINK_SPEED_10M_FULL: | 6120 | case PORT_FEATURE_LINK_SPEED_10M_FULL: |
5246 | if (bp->supported & SUPPORTED_10baseT_Full) { | 6121 | if (bp->port.supported & SUPPORTED_10baseT_Full) { |
5247 | bp->link_params.req_line_speed = SPEED_10; | 6122 | bp->link_params.req_line_speed = SPEED_10; |
5248 | bp->advertising = (ADVERTISED_10baseT_Full | | 6123 | bp->port.advertising = (ADVERTISED_10baseT_Full | |
5249 | ADVERTISED_TP); | 6124 | ADVERTISED_TP); |
5250 | } else { | 6125 | } else { |
5251 | BNX2X_ERR("NVRAM config error. " | 6126 | BNX2X_ERR("NVRAM config error. " |
5252 | "Invalid link_config 0x%x" | 6127 | "Invalid link_config 0x%x" |
5253 | " speed_cap_mask 0x%x\n", | 6128 | " speed_cap_mask 0x%x\n", |
5254 | bp->link_config, | 6129 | bp->port.link_config, |
5255 | bp->link_params.speed_cap_mask); | 6130 | bp->link_params.speed_cap_mask); |
5256 | return; | 6131 | return; |
5257 | } | 6132 | } |
5258 | break; | 6133 | break; |
5259 | 6134 | ||
5260 | case PORT_FEATURE_LINK_SPEED_10M_HALF: | 6135 | case PORT_FEATURE_LINK_SPEED_10M_HALF: |
5261 | if (bp->supported & SUPPORTED_10baseT_Half) { | 6136 | if (bp->port.supported & SUPPORTED_10baseT_Half) { |
5262 | bp->link_params.req_line_speed = SPEED_10; | 6137 | bp->link_params.req_line_speed = SPEED_10; |
5263 | bp->link_params.req_duplex = DUPLEX_HALF; | 6138 | bp->link_params.req_duplex = DUPLEX_HALF; |
5264 | bp->advertising = (ADVERTISED_10baseT_Half | | 6139 | bp->port.advertising = (ADVERTISED_10baseT_Half | |
5265 | ADVERTISED_TP); | 6140 | ADVERTISED_TP); |
5266 | } else { | 6141 | } else { |
5267 | BNX2X_ERR("NVRAM config error. " | 6142 | BNX2X_ERR("NVRAM config error. " |
5268 | "Invalid link_config 0x%x" | 6143 | "Invalid link_config 0x%x" |
5269 | " speed_cap_mask 0x%x\n", | 6144 | " speed_cap_mask 0x%x\n", |
5270 | bp->link_config, | 6145 | bp->port.link_config, |
5271 | bp->link_params.speed_cap_mask); | 6146 | bp->link_params.speed_cap_mask); |
5272 | return; | 6147 | return; |
5273 | } | 6148 | } |
5274 | break; | 6149 | break; |
5275 | 6150 | ||
5276 | case PORT_FEATURE_LINK_SPEED_100M_FULL: | 6151 | case PORT_FEATURE_LINK_SPEED_100M_FULL: |
5277 | if (bp->supported & SUPPORTED_100baseT_Full) { | 6152 | if (bp->port.supported & SUPPORTED_100baseT_Full) { |
5278 | bp->link_params.req_line_speed = SPEED_100; | 6153 | bp->link_params.req_line_speed = SPEED_100; |
5279 | bp->advertising = (ADVERTISED_100baseT_Full | | 6154 | bp->port.advertising = (ADVERTISED_100baseT_Full | |
5280 | ADVERTISED_TP); | 6155 | ADVERTISED_TP); |
5281 | } else { | 6156 | } else { |
5282 | BNX2X_ERR("NVRAM config error. " | 6157 | BNX2X_ERR("NVRAM config error. " |
5283 | "Invalid link_config 0x%x" | 6158 | "Invalid link_config 0x%x" |
5284 | " speed_cap_mask 0x%x\n", | 6159 | " speed_cap_mask 0x%x\n", |
5285 | bp->link_config, | 6160 | bp->port.link_config, |
5286 | bp->link_params.speed_cap_mask); | 6161 | bp->link_params.speed_cap_mask); |
5287 | return; | 6162 | return; |
5288 | } | 6163 | } |
5289 | break; | 6164 | break; |
5290 | 6165 | ||
5291 | case PORT_FEATURE_LINK_SPEED_100M_HALF: | 6166 | case PORT_FEATURE_LINK_SPEED_100M_HALF: |
5292 | if (bp->supported & SUPPORTED_100baseT_Half) { | 6167 | if (bp->port.supported & SUPPORTED_100baseT_Half) { |
5293 | bp->link_params.req_line_speed = SPEED_100; | 6168 | bp->link_params.req_line_speed = SPEED_100; |
5294 | bp->link_params.req_duplex = DUPLEX_HALF; | 6169 | bp->link_params.req_duplex = DUPLEX_HALF; |
5295 | bp->advertising = (ADVERTISED_100baseT_Half | | 6170 | bp->port.advertising = (ADVERTISED_100baseT_Half | |
5296 | ADVERTISED_TP); | 6171 | ADVERTISED_TP); |
5297 | } else { | 6172 | } else { |
5298 | BNX2X_ERR("NVRAM config error. " | 6173 | BNX2X_ERR("NVRAM config error. " |
5299 | "Invalid link_config 0x%x" | 6174 | "Invalid link_config 0x%x" |
5300 | " speed_cap_mask 0x%x\n", | 6175 | " speed_cap_mask 0x%x\n", |
5301 | bp->link_config, | 6176 | bp->port.link_config, |
5302 | bp->link_params.speed_cap_mask); | 6177 | bp->link_params.speed_cap_mask); |
5303 | return; | 6178 | return; |
5304 | } | 6179 | } |
5305 | break; | 6180 | break; |
5306 | 6181 | ||
5307 | case PORT_FEATURE_LINK_SPEED_1G: | 6182 | case PORT_FEATURE_LINK_SPEED_1G: |
5308 | if (bp->supported & SUPPORTED_1000baseT_Full) { | 6183 | if (bp->port.supported & SUPPORTED_1000baseT_Full) { |
5309 | bp->link_params.req_line_speed = SPEED_1000; | 6184 | bp->link_params.req_line_speed = SPEED_1000; |
5310 | bp->advertising = (ADVERTISED_1000baseT_Full | | 6185 | bp->port.advertising = (ADVERTISED_1000baseT_Full | |
5311 | ADVERTISED_TP); | 6186 | ADVERTISED_TP); |
5312 | } else { | 6187 | } else { |
5313 | BNX2X_ERR("NVRAM config error. " | 6188 | BNX2X_ERR("NVRAM config error. " |
5314 | "Invalid link_config 0x%x" | 6189 | "Invalid link_config 0x%x" |
5315 | " speed_cap_mask 0x%x\n", | 6190 | " speed_cap_mask 0x%x\n", |
5316 | bp->link_config, | 6191 | bp->port.link_config, |
5317 | bp->link_params.speed_cap_mask); | 6192 | bp->link_params.speed_cap_mask); |
5318 | return; | 6193 | return; |
5319 | } | 6194 | } |
5320 | break; | 6195 | break; |
5321 | 6196 | ||
5322 | case PORT_FEATURE_LINK_SPEED_2_5G: | 6197 | case PORT_FEATURE_LINK_SPEED_2_5G: |
5323 | if (bp->supported & SUPPORTED_2500baseX_Full) { | 6198 | if (bp->port.supported & SUPPORTED_2500baseX_Full) { |
5324 | bp->link_params.req_line_speed = SPEED_2500; | 6199 | bp->link_params.req_line_speed = SPEED_2500; |
5325 | bp->advertising = (ADVERTISED_2500baseX_Full | | 6200 | bp->port.advertising = (ADVERTISED_2500baseX_Full | |
5326 | ADVERTISED_TP); | 6201 | ADVERTISED_TP); |
5327 | } else { | 6202 | } else { |
5328 | BNX2X_ERR("NVRAM config error. " | 6203 | BNX2X_ERR("NVRAM config error. " |
5329 | "Invalid link_config 0x%x" | 6204 | "Invalid link_config 0x%x" |
5330 | " speed_cap_mask 0x%x\n", | 6205 | " speed_cap_mask 0x%x\n", |
5331 | bp->link_config, | 6206 | bp->port.link_config, |
5332 | bp->link_params.speed_cap_mask); | 6207 | bp->link_params.speed_cap_mask); |
5333 | return; | 6208 | return; |
5334 | } | 6209 | } |
@@ -5337,15 +6212,15 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
5337 | case PORT_FEATURE_LINK_SPEED_10G_CX4: | 6212 | case PORT_FEATURE_LINK_SPEED_10G_CX4: |
5338 | case PORT_FEATURE_LINK_SPEED_10G_KX4: | 6213 | case PORT_FEATURE_LINK_SPEED_10G_KX4: |
5339 | case PORT_FEATURE_LINK_SPEED_10G_KR: | 6214 | case PORT_FEATURE_LINK_SPEED_10G_KR: |
5340 | if (bp->supported & SUPPORTED_10000baseT_Full) { | 6215 | if (bp->port.supported & SUPPORTED_10000baseT_Full) { |
5341 | bp->link_params.req_line_speed = SPEED_10000; | 6216 | bp->link_params.req_line_speed = SPEED_10000; |
5342 | bp->advertising = (ADVERTISED_10000baseT_Full | | 6217 | bp->port.advertising = (ADVERTISED_10000baseT_Full | |
5343 | ADVERTISED_FIBRE); | 6218 | ADVERTISED_FIBRE); |
5344 | } else { | 6219 | } else { |
5345 | BNX2X_ERR("NVRAM config error. " | 6220 | BNX2X_ERR("NVRAM config error. " |
5346 | "Invalid link_config 0x%x" | 6221 | "Invalid link_config 0x%x" |
5347 | " speed_cap_mask 0x%x\n", | 6222 | " speed_cap_mask 0x%x\n", |
5348 | bp->link_config, | 6223 | bp->port.link_config, |
5349 | bp->link_params.speed_cap_mask); | 6224 | bp->link_params.speed_cap_mask); |
5350 | return; | 6225 | return; |
5351 | } | 6226 | } |
@@ -5354,64 +6229,33 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
5354 | default: | 6229 | default: |
5355 | BNX2X_ERR("NVRAM config error. " | 6230 | BNX2X_ERR("NVRAM config error. " |
5356 | "BAD link speed link_config 0x%x\n", | 6231 | "BAD link speed link_config 0x%x\n", |
5357 | bp->link_config); | 6232 | bp->port.link_config); |
5358 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; | 6233 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; |
5359 | bp->advertising = bp->supported; | 6234 | bp->port.advertising = bp->port.supported; |
5360 | break; | 6235 | break; |
5361 | } | 6236 | } |
5362 | 6237 | ||
5363 | bp->link_params.req_flow_ctrl = (bp->link_config & | 6238 | bp->link_params.req_flow_ctrl = (bp->port.link_config & |
5364 | PORT_FEATURE_FLOW_CONTROL_MASK); | 6239 | PORT_FEATURE_FLOW_CONTROL_MASK); |
5365 | if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) && | 6240 | if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) && |
5366 | (!bp->supported & SUPPORTED_Autoneg)) | 6241 | (!bp->port.supported & SUPPORTED_Autoneg)) |
5367 | bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; | 6242 | bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; |
5368 | 6243 | ||
5369 | BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" | 6244 | BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" |
5370 | " advertising 0x%x\n", | 6245 | " advertising 0x%x\n", |
5371 | bp->link_params.req_line_speed, | 6246 | bp->link_params.req_line_speed, |
5372 | bp->link_params.req_duplex, | 6247 | bp->link_params.req_duplex, |
5373 | bp->link_params.req_flow_ctrl, bp->advertising); | 6248 | bp->link_params.req_flow_ctrl, bp->port.advertising); |
5374 | } | 6249 | } |
5375 | 6250 | ||
5376 | static void bnx2x_get_hwinfo(struct bnx2x *bp) | 6251 | static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) |
5377 | { | 6252 | { |
5378 | u32 val, val2, val3, val4, id; | 6253 | int port = BP_PORT(bp); |
5379 | int port = bp->port; | 6254 | u32 val, val2; |
5380 | |||
5381 | bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); | ||
5382 | BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base); | ||
5383 | |||
5384 | /* Get the chip revision id and number. */ | ||
5385 | /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ | ||
5386 | val = REG_RD(bp, MISC_REG_CHIP_NUM); | ||
5387 | id = ((val & 0xffff) << 16); | ||
5388 | val = REG_RD(bp, MISC_REG_CHIP_REV); | ||
5389 | id |= ((val & 0xf) << 12); | ||
5390 | val = REG_RD(bp, MISC_REG_CHIP_METAL); | ||
5391 | id |= ((val & 0xff) << 4); | ||
5392 | REG_RD(bp, MISC_REG_BOND_ID); | ||
5393 | id |= (val & 0xf); | ||
5394 | bp->chip_id = id; | ||
5395 | BNX2X_DEV_INFO("chip ID is %x\n", id); | ||
5396 | 6255 | ||
5397 | bp->link_params.bp = bp; | 6256 | bp->link_params.bp = bp; |
6257 | bp->link_params.port = port; | ||
5398 | 6258 | ||
5399 | if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) { | ||
5400 | BNX2X_DEV_INFO("MCP not active\n"); | ||
5401 | nomcp = 1; | ||
5402 | goto set_mac; | ||
5403 | } | ||
5404 | |||
5405 | val = SHMEM_RD(bp, validity_map[port]); | ||
5406 | if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | ||
5407 | != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | ||
5408 | BNX2X_ERR("BAD MCP validity signature\n"); | ||
5409 | |||
5410 | bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) & | ||
5411 | DRV_MSG_SEQ_NUMBER_MASK); | ||
5412 | |||
5413 | bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); | ||
5414 | bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board); | ||
5415 | bp->link_params.serdes_config = | 6259 | bp->link_params.serdes_config = |
5416 | SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config); | 6260 | SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config); |
5417 | bp->link_params.lane_config = | 6261 | bp->link_params.lane_config = |
@@ -5423,19 +6267,18 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp) | |||
5423 | SHMEM_RD(bp, | 6267 | SHMEM_RD(bp, |
5424 | dev_info.port_hw_config[port].speed_capability_mask); | 6268 | dev_info.port_hw_config[port].speed_capability_mask); |
5425 | 6269 | ||
5426 | bp->link_config = | 6270 | bp->port.link_config = |
5427 | SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); | 6271 | SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); |
5428 | 6272 | ||
5429 | BNX2X_DEV_INFO("serdes_config (%08x) lane_config (%08x)\n" | 6273 | BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n" |
5430 | KERN_INFO " ext_phy_config (%08x) speed_cap_mask (%08x)" | 6274 | KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x" |
5431 | " link_config (%08x)\n", | 6275 | " link_config 0x%08x\n", |
5432 | bp->link_params.serdes_config, | 6276 | bp->link_params.serdes_config, |
5433 | bp->link_params.lane_config, | 6277 | bp->link_params.lane_config, |
5434 | bp->link_params.ext_phy_config, | 6278 | bp->link_params.ext_phy_config, |
5435 | bp->link_params.speed_cap_mask, | 6279 | bp->link_params.speed_cap_mask, bp->port.link_config); |
5436 | bp->link_config); | ||
5437 | 6280 | ||
5438 | bp->link_params.switch_cfg = (bp->link_config & | 6281 | bp->link_params.switch_cfg = (bp->port.link_config & |
5439 | PORT_FEATURE_CONNECTED_SWITCH_MASK); | 6282 | PORT_FEATURE_CONNECTED_SWITCH_MASK); |
5440 | bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); | 6283 | bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); |
5441 | 6284 | ||
@@ -5451,43 +6294,126 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp) | |||
5451 | bp->dev->dev_addr[5] = (u8)(val & 0xff); | 6294 | bp->dev->dev_addr[5] = (u8)(val & 0xff); |
5452 | memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); | 6295 | memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); |
5453 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); | 6296 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); |
6297 | } | ||
5454 | 6298 | ||
6299 | static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | ||
6300 | { | ||
6301 | int func = BP_FUNC(bp); | ||
6302 | u32 val, val2; | ||
6303 | int rc = 0; | ||
5455 | 6304 | ||
6305 | bnx2x_get_common_hwinfo(bp); | ||
5456 | 6306 | ||
5457 | val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); | 6307 | bp->e1hov = 0; |
5458 | val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); | 6308 | bp->e1hmf = 0; |
5459 | val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); | 6309 | if (CHIP_IS_E1H(bp)) { |
5460 | val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); | 6310 | bp->mf_config = |
6311 | SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); | ||
5461 | 6312 | ||
5462 | printk(KERN_INFO PFX "part number %X-%X-%X-%X\n", | 6313 | val = |
5463 | val, val2, val3, val4); | 6314 | (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) & |
6315 | FUNC_MF_CFG_E1HOV_TAG_MASK); | ||
6316 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { | ||
5464 | 6317 | ||
5465 | /* bc ver */ | 6318 | bp->e1hov = val; |
5466 | if (!nomcp) { | 6319 | bp->e1hmf = 1; |
5467 | bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8); | 6320 | BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d " |
5468 | BNX2X_DEV_INFO("bc_ver %X\n", val); | 6321 | "(0x%04x)\n", |
5469 | if (val < BNX2X_BC_VER) { | 6322 | func, bp->e1hov, bp->e1hov); |
5470 | /* for now only warn | 6323 | } else { |
5471 | * later we might need to enforce this */ | 6324 | BNX2X_DEV_INFO("Single function mode\n"); |
5472 | BNX2X_ERR("This driver needs bc_ver %X but found %X," | 6325 | if (BP_E1HVN(bp)) { |
5473 | " please upgrade BC\n", BNX2X_BC_VER, val); | 6326 | BNX2X_ERR("!!! No valid E1HOV for func %d," |
6327 | " aborting\n", func); | ||
6328 | rc = -EPERM; | ||
6329 | } | ||
5474 | } | 6330 | } |
5475 | } else { | ||
5476 | bp->bc_ver = 0; | ||
5477 | } | 6331 | } |
5478 | 6332 | ||
5479 | val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); | 6333 | if (!BP_NOMCP(bp)) { |
5480 | bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); | 6334 | bnx2x_get_port_hwinfo(bp); |
5481 | BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", | 6335 | |
5482 | bp->flash_size, bp->flash_size); | 6336 | bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) & |
6337 | DRV_MSG_SEQ_NUMBER_MASK); | ||
6338 | BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); | ||
6339 | } | ||
6340 | |||
6341 | if (IS_E1HMF(bp)) { | ||
6342 | val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper); | ||
6343 | val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower); | ||
6344 | if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && | ||
6345 | (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) { | ||
6346 | bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); | ||
6347 | bp->dev->dev_addr[1] = (u8)(val2 & 0xff); | ||
6348 | bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff); | ||
6349 | bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff); | ||
6350 | bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff); | ||
6351 | bp->dev->dev_addr[5] = (u8)(val & 0xff); | ||
6352 | memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, | ||
6353 | ETH_ALEN); | ||
6354 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, | ||
6355 | ETH_ALEN); | ||
6356 | } | ||
5483 | 6357 | ||
5484 | return; | 6358 | return rc; |
6359 | } | ||
6360 | |||
6361 | if (BP_NOMCP(bp)) { | ||
6362 | /* only supposed to happen on emulation/FPGA */ | ||
6363 | BNX2X_ERR("warning rendom MAC workaround active\n"); | ||
6364 | random_ether_addr(bp->dev->dev_addr); | ||
6365 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); | ||
6366 | } | ||
6367 | |||
6368 | return rc; | ||
6369 | } | ||
6370 | |||
6371 | static int __devinit bnx2x_init_bp(struct bnx2x *bp) | ||
6372 | { | ||
6373 | int func = BP_FUNC(bp); | ||
6374 | int rc; | ||
6375 | |||
6376 | if (nomcp) | ||
6377 | bp->flags |= NO_MCP_FLAG; | ||
6378 | |||
6379 | mutex_init(&bp->port.phy_mutex); | ||
6380 | |||
6381 | INIT_WORK(&bp->sp_task, bnx2x_sp_task); | ||
6382 | INIT_WORK(&bp->reset_task, bnx2x_reset_task); | ||
6383 | |||
6384 | rc = bnx2x_get_hwinfo(bp); | ||
6385 | |||
6386 | /* need to reset chip if undi was active */ | ||
6387 | if (!BP_NOMCP(bp)) | ||
6388 | bnx2x_undi_unload(bp); | ||
6389 | |||
6390 | if (CHIP_REV_IS_FPGA(bp)) | ||
6391 | printk(KERN_ERR PFX "FPGA detected\n"); | ||
5485 | 6392 | ||
5486 | set_mac: /* only supposed to happen on emulation/FPGA */ | 6393 | if (BP_NOMCP(bp) && (func == 0)) |
5487 | BNX2X_ERR("warning rendom MAC workaround active\n"); | 6394 | printk(KERN_ERR PFX |
5488 | random_ether_addr(bp->dev->dev_addr); | 6395 | "MCP disabled, must load devices in order!\n"); |
5489 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6); | ||
5490 | 6396 | ||
6397 | bp->tx_ring_size = MAX_TX_AVAIL; | ||
6398 | bp->rx_ring_size = MAX_RX_AVAIL; | ||
6399 | |||
6400 | bp->rx_csum = 1; | ||
6401 | bp->rx_offset = 0; | ||
6402 | |||
6403 | bp->tx_ticks = 50; | ||
6404 | bp->rx_ticks = 25; | ||
6405 | |||
6406 | bp->stats_ticks = 1000000 & 0xffff00; | ||
6407 | |||
6408 | bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); | ||
6409 | bp->current_interval = (poll ? poll : bp->timer_interval); | ||
6410 | |||
6411 | init_timer(&bp->timer); | ||
6412 | bp->timer.expires = jiffies + bp->current_interval; | ||
6413 | bp->timer.data = (unsigned long) bp; | ||
6414 | bp->timer.function = bnx2x_timer; | ||
6415 | |||
6416 | return rc; | ||
5491 | } | 6417 | } |
5492 | 6418 | ||
5493 | /* | 6419 | /* |
@@ -5500,8 +6426,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5500 | { | 6426 | { |
5501 | struct bnx2x *bp = netdev_priv(dev); | 6427 | struct bnx2x *bp = netdev_priv(dev); |
5502 | 6428 | ||
5503 | cmd->supported = bp->supported; | 6429 | cmd->supported = bp->port.supported; |
5504 | cmd->advertising = bp->advertising; | 6430 | cmd->advertising = bp->port.advertising; |
5505 | 6431 | ||
5506 | if (netif_carrier_ok(dev)) { | 6432 | if (netif_carrier_ok(dev)) { |
5507 | cmd->speed = bp->link_vars.line_speed; | 6433 | cmd->speed = bp->link_vars.line_speed; |
@@ -5510,6 +6436,14 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5510 | cmd->speed = bp->link_params.req_line_speed; | 6436 | cmd->speed = bp->link_params.req_line_speed; |
5511 | cmd->duplex = bp->link_params.req_duplex; | 6437 | cmd->duplex = bp->link_params.req_duplex; |
5512 | } | 6438 | } |
6439 | if (IS_E1HMF(bp)) { | ||
6440 | u16 vn_max_rate; | ||
6441 | |||
6442 | vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
6443 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | ||
6444 | if (vn_max_rate < cmd->speed) | ||
6445 | cmd->speed = vn_max_rate; | ||
6446 | } | ||
5513 | 6447 | ||
5514 | if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { | 6448 | if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { |
5515 | u32 ext_phy_type = | 6449 | u32 ext_phy_type = |
@@ -5541,7 +6475,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5541 | } else | 6475 | } else |
5542 | cmd->port = PORT_TP; | 6476 | cmd->port = PORT_TP; |
5543 | 6477 | ||
5544 | cmd->phy_address = bp->phy_addr; | 6478 | cmd->phy_address = bp->port.phy_addr; |
5545 | cmd->transceiver = XCVR_INTERNAL; | 6479 | cmd->transceiver = XCVR_INTERNAL; |
5546 | 6480 | ||
5547 | if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) | 6481 | if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) |
@@ -5568,6 +6502,9 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5568 | struct bnx2x *bp = netdev_priv(dev); | 6502 | struct bnx2x *bp = netdev_priv(dev); |
5569 | u32 advertising; | 6503 | u32 advertising; |
5570 | 6504 | ||
6505 | if (IS_E1HMF(bp)) | ||
6506 | return 0; | ||
6507 | |||
5571 | DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" | 6508 | DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" |
5572 | DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" | 6509 | DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" |
5573 | DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" | 6510 | DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" |
@@ -5577,24 +6514,25 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5577 | cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); | 6514 | cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); |
5578 | 6515 | ||
5579 | if (cmd->autoneg == AUTONEG_ENABLE) { | 6516 | if (cmd->autoneg == AUTONEG_ENABLE) { |
5580 | if (!(bp->supported & SUPPORTED_Autoneg)) { | 6517 | if (!(bp->port.supported & SUPPORTED_Autoneg)) { |
5581 | DP(NETIF_MSG_LINK, "Aotoneg not supported\n"); | 6518 | DP(NETIF_MSG_LINK, "Autoneg not supported\n"); |
5582 | return -EINVAL; | 6519 | return -EINVAL; |
5583 | } | 6520 | } |
5584 | 6521 | ||
5585 | /* advertise the requested speed and duplex if supported */ | 6522 | /* advertise the requested speed and duplex if supported */ |
5586 | cmd->advertising &= bp->supported; | 6523 | cmd->advertising &= bp->port.supported; |
5587 | 6524 | ||
5588 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; | 6525 | bp->link_params.req_line_speed = SPEED_AUTO_NEG; |
5589 | bp->link_params.req_duplex = DUPLEX_FULL; | 6526 | bp->link_params.req_duplex = DUPLEX_FULL; |
5590 | bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising); | 6527 | bp->port.advertising |= (ADVERTISED_Autoneg | |
6528 | cmd->advertising); | ||
5591 | 6529 | ||
5592 | } else { /* forced speed */ | 6530 | } else { /* forced speed */ |
5593 | /* advertise the requested speed and duplex if supported */ | 6531 | /* advertise the requested speed and duplex if supported */ |
5594 | switch (cmd->speed) { | 6532 | switch (cmd->speed) { |
5595 | case SPEED_10: | 6533 | case SPEED_10: |
5596 | if (cmd->duplex == DUPLEX_FULL) { | 6534 | if (cmd->duplex == DUPLEX_FULL) { |
5597 | if (!(bp->supported & | 6535 | if (!(bp->port.supported & |
5598 | SUPPORTED_10baseT_Full)) { | 6536 | SUPPORTED_10baseT_Full)) { |
5599 | DP(NETIF_MSG_LINK, | 6537 | DP(NETIF_MSG_LINK, |
5600 | "10M full not supported\n"); | 6538 | "10M full not supported\n"); |
@@ -5604,7 +6542,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5604 | advertising = (ADVERTISED_10baseT_Full | | 6542 | advertising = (ADVERTISED_10baseT_Full | |
5605 | ADVERTISED_TP); | 6543 | ADVERTISED_TP); |
5606 | } else { | 6544 | } else { |
5607 | if (!(bp->supported & | 6545 | if (!(bp->port.supported & |
5608 | SUPPORTED_10baseT_Half)) { | 6546 | SUPPORTED_10baseT_Half)) { |
5609 | DP(NETIF_MSG_LINK, | 6547 | DP(NETIF_MSG_LINK, |
5610 | "10M half not supported\n"); | 6548 | "10M half not supported\n"); |
@@ -5618,7 +6556,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5618 | 6556 | ||
5619 | case SPEED_100: | 6557 | case SPEED_100: |
5620 | if (cmd->duplex == DUPLEX_FULL) { | 6558 | if (cmd->duplex == DUPLEX_FULL) { |
5621 | if (!(bp->supported & | 6559 | if (!(bp->port.supported & |
5622 | SUPPORTED_100baseT_Full)) { | 6560 | SUPPORTED_100baseT_Full)) { |
5623 | DP(NETIF_MSG_LINK, | 6561 | DP(NETIF_MSG_LINK, |
5624 | "100M full not supported\n"); | 6562 | "100M full not supported\n"); |
@@ -5628,7 +6566,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5628 | advertising = (ADVERTISED_100baseT_Full | | 6566 | advertising = (ADVERTISED_100baseT_Full | |
5629 | ADVERTISED_TP); | 6567 | ADVERTISED_TP); |
5630 | } else { | 6568 | } else { |
5631 | if (!(bp->supported & | 6569 | if (!(bp->port.supported & |
5632 | SUPPORTED_100baseT_Half)) { | 6570 | SUPPORTED_100baseT_Half)) { |
5633 | DP(NETIF_MSG_LINK, | 6571 | DP(NETIF_MSG_LINK, |
5634 | "100M half not supported\n"); | 6572 | "100M half not supported\n"); |
@@ -5646,7 +6584,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5646 | return -EINVAL; | 6584 | return -EINVAL; |
5647 | } | 6585 | } |
5648 | 6586 | ||
5649 | if (!(bp->supported & SUPPORTED_1000baseT_Full)) { | 6587 | if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) { |
5650 | DP(NETIF_MSG_LINK, "1G full not supported\n"); | 6588 | DP(NETIF_MSG_LINK, "1G full not supported\n"); |
5651 | return -EINVAL; | 6589 | return -EINVAL; |
5652 | } | 6590 | } |
@@ -5662,7 +6600,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5662 | return -EINVAL; | 6600 | return -EINVAL; |
5663 | } | 6601 | } |
5664 | 6602 | ||
5665 | if (!(bp->supported & SUPPORTED_2500baseX_Full)) { | 6603 | if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) { |
5666 | DP(NETIF_MSG_LINK, | 6604 | DP(NETIF_MSG_LINK, |
5667 | "2.5G full not supported\n"); | 6605 | "2.5G full not supported\n"); |
5668 | return -EINVAL; | 6606 | return -EINVAL; |
@@ -5678,7 +6616,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5678 | return -EINVAL; | 6616 | return -EINVAL; |
5679 | } | 6617 | } |
5680 | 6618 | ||
5681 | if (!(bp->supported & SUPPORTED_10000baseT_Full)) { | 6619 | if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) { |
5682 | DP(NETIF_MSG_LINK, "10G full not supported\n"); | 6620 | DP(NETIF_MSG_LINK, "10G full not supported\n"); |
5683 | return -EINVAL; | 6621 | return -EINVAL; |
5684 | } | 6622 | } |
@@ -5694,16 +6632,18 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
5694 | 6632 | ||
5695 | bp->link_params.req_line_speed = cmd->speed; | 6633 | bp->link_params.req_line_speed = cmd->speed; |
5696 | bp->link_params.req_duplex = cmd->duplex; | 6634 | bp->link_params.req_duplex = cmd->duplex; |
5697 | bp->advertising = advertising; | 6635 | bp->port.advertising = advertising; |
5698 | } | 6636 | } |
5699 | 6637 | ||
5700 | DP(NETIF_MSG_LINK, "req_line_speed %d\n" | 6638 | DP(NETIF_MSG_LINK, "req_line_speed %d\n" |
5701 | DP_LEVEL " req_duplex %d advertising 0x%x\n", | 6639 | DP_LEVEL " req_duplex %d advertising 0x%x\n", |
5702 | bp->link_params.req_line_speed, bp->link_params.req_duplex, | 6640 | bp->link_params.req_line_speed, bp->link_params.req_duplex, |
5703 | bp->advertising); | 6641 | bp->port.advertising); |
5704 | 6642 | ||
5705 | bnx2x_stop_stats(bp); | 6643 | if (netif_running(dev)) { |
5706 | bnx2x_link_set(bp); | 6644 | bnx2x_stop_stats(bp); |
6645 | bnx2x_link_set(bp); | ||
6646 | } | ||
5707 | 6647 | ||
5708 | return 0; | 6648 | return 0; |
5709 | } | 6649 | } |
@@ -5720,21 +6660,23 @@ static void bnx2x_get_drvinfo(struct net_device *dev, | |||
5720 | strcpy(info->version, DRV_MODULE_VERSION); | 6660 | strcpy(info->version, DRV_MODULE_VERSION); |
5721 | 6661 | ||
5722 | phy_fw_ver[0] = '\0'; | 6662 | phy_fw_ver[0] = '\0'; |
5723 | bnx2x_phy_hw_lock(bp); | 6663 | if (bp->port.pmf) { |
5724 | bnx2x_get_ext_phy_fw_version(&bp->link_params, | 6664 | bnx2x_phy_hw_lock(bp); |
5725 | (bp->state != BNX2X_STATE_CLOSED), | 6665 | bnx2x_get_ext_phy_fw_version(&bp->link_params, |
5726 | phy_fw_ver, PHY_FW_VER_LEN); | 6666 | (bp->state != BNX2X_STATE_CLOSED), |
5727 | bnx2x_phy_hw_unlock(bp); | 6667 | phy_fw_ver, PHY_FW_VER_LEN); |
6668 | bnx2x_phy_hw_unlock(bp); | ||
6669 | } | ||
5728 | 6670 | ||
5729 | snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s", | 6671 | snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s", |
5730 | BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, | 6672 | BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, |
5731 | BCM_5710_FW_REVISION_VERSION, | 6673 | BCM_5710_FW_REVISION_VERSION, |
5732 | BCM_5710_FW_COMPILE_FLAGS, bp->bc_ver, | 6674 | BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver, |
5733 | ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver); | 6675 | ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver); |
5734 | strcpy(info->bus_info, pci_name(bp->pdev)); | 6676 | strcpy(info->bus_info, pci_name(bp->pdev)); |
5735 | info->n_stats = BNX2X_NUM_STATS; | 6677 | info->n_stats = BNX2X_NUM_STATS; |
5736 | info->testinfo_len = BNX2X_NUM_TESTS; | 6678 | info->testinfo_len = BNX2X_NUM_TESTS; |
5737 | info->eedump_len = bp->flash_size; | 6679 | info->eedump_len = bp->common.flash_size; |
5738 | info->regdump_len = 0; | 6680 | info->regdump_len = 0; |
5739 | } | 6681 | } |
5740 | 6682 | ||
@@ -5767,9 +6709,9 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
5767 | return -EINVAL; | 6709 | return -EINVAL; |
5768 | 6710 | ||
5769 | bp->wol = 1; | 6711 | bp->wol = 1; |
5770 | } else { | 6712 | } else |
5771 | bp->wol = 0; | 6713 | bp->wol = 0; |
5772 | } | 6714 | |
5773 | return 0; | 6715 | return 0; |
5774 | } | 6716 | } |
5775 | 6717 | ||
@@ -5792,13 +6734,13 @@ static int bnx2x_nway_reset(struct net_device *dev) | |||
5792 | { | 6734 | { |
5793 | struct bnx2x *bp = netdev_priv(dev); | 6735 | struct bnx2x *bp = netdev_priv(dev); |
5794 | 6736 | ||
5795 | if (bp->state != BNX2X_STATE_OPEN) { | 6737 | if (!bp->port.pmf) |
5796 | DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state); | 6738 | return 0; |
5797 | return -EAGAIN; | ||
5798 | } | ||
5799 | 6739 | ||
5800 | bnx2x_stop_stats(bp); | 6740 | if (netif_running(dev)) { |
5801 | bnx2x_link_set(bp); | 6741 | bnx2x_stop_stats(bp); |
6742 | bnx2x_link_set(bp); | ||
6743 | } | ||
5802 | 6744 | ||
5803 | return 0; | 6745 | return 0; |
5804 | } | 6746 | } |
@@ -5807,12 +6749,12 @@ static int bnx2x_get_eeprom_len(struct net_device *dev) | |||
5807 | { | 6749 | { |
5808 | struct bnx2x *bp = netdev_priv(dev); | 6750 | struct bnx2x *bp = netdev_priv(dev); |
5809 | 6751 | ||
5810 | return bp->flash_size; | 6752 | return bp->common.flash_size; |
5811 | } | 6753 | } |
5812 | 6754 | ||
5813 | static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) | 6755 | static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) |
5814 | { | 6756 | { |
5815 | int port = bp->port; | 6757 | int port = BP_PORT(bp); |
5816 | int count, i; | 6758 | int count, i; |
5817 | u32 val = 0; | 6759 | u32 val = 0; |
5818 | 6760 | ||
@@ -5834,7 +6776,7 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) | |||
5834 | } | 6776 | } |
5835 | 6777 | ||
5836 | if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { | 6778 | if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { |
5837 | DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n"); | 6779 | DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n"); |
5838 | return -EBUSY; | 6780 | return -EBUSY; |
5839 | } | 6781 | } |
5840 | 6782 | ||
@@ -5843,7 +6785,7 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) | |||
5843 | 6785 | ||
5844 | static int bnx2x_release_nvram_lock(struct bnx2x *bp) | 6786 | static int bnx2x_release_nvram_lock(struct bnx2x *bp) |
5845 | { | 6787 | { |
5846 | int port = bp->port; | 6788 | int port = BP_PORT(bp); |
5847 | int count, i; | 6789 | int count, i; |
5848 | u32 val = 0; | 6790 | u32 val = 0; |
5849 | 6791 | ||
@@ -5865,7 +6807,7 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp) | |||
5865 | } | 6807 | } |
5866 | 6808 | ||
5867 | if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { | 6809 | if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { |
5868 | DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n"); | 6810 | DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n"); |
5869 | return -EBUSY; | 6811 | return -EBUSY; |
5870 | } | 6812 | } |
5871 | 6813 | ||
@@ -5929,7 +6871,6 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val, | |||
5929 | 6871 | ||
5930 | if (val & MCPR_NVM_COMMAND_DONE) { | 6872 | if (val & MCPR_NVM_COMMAND_DONE) { |
5931 | val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); | 6873 | val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); |
5932 | DP(NETIF_MSG_NVM, "val 0x%08x\n", val); | ||
5933 | /* we read nvram data in cpu order | 6874 | /* we read nvram data in cpu order |
5934 | * but ethtool sees it as an array of bytes | 6875 | * but ethtool sees it as an array of bytes |
5935 | * converting to big-endian will do the work */ | 6876 | * converting to big-endian will do the work */ |
@@ -5951,16 +6892,16 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, | |||
5951 | u32 val; | 6892 | u32 val; |
5952 | 6893 | ||
5953 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { | 6894 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { |
5954 | DP(NETIF_MSG_NVM, | 6895 | DP(BNX2X_MSG_NVM, |
5955 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", | 6896 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", |
5956 | offset, buf_size); | 6897 | offset, buf_size); |
5957 | return -EINVAL; | 6898 | return -EINVAL; |
5958 | } | 6899 | } |
5959 | 6900 | ||
5960 | if (offset + buf_size > bp->flash_size) { | 6901 | if (offset + buf_size > bp->common.flash_size) { |
5961 | DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +" | 6902 | DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" |
5962 | " buf_size (0x%x) > flash_size (0x%x)\n", | 6903 | " buf_size (0x%x) > flash_size (0x%x)\n", |
5963 | offset, buf_size, bp->flash_size); | 6904 | offset, buf_size, bp->common.flash_size); |
5964 | return -EINVAL; | 6905 | return -EINVAL; |
5965 | } | 6906 | } |
5966 | 6907 | ||
@@ -6004,7 +6945,7 @@ static int bnx2x_get_eeprom(struct net_device *dev, | |||
6004 | struct bnx2x *bp = netdev_priv(dev); | 6945 | struct bnx2x *bp = netdev_priv(dev); |
6005 | int rc; | 6946 | int rc; |
6006 | 6947 | ||
6007 | DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n" | 6948 | DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" |
6008 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", | 6949 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", |
6009 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, | 6950 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, |
6010 | eeprom->len, eeprom->len); | 6951 | eeprom->len, eeprom->len); |
@@ -6066,10 +7007,10 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, | |||
6066 | u32 align_offset; | 7007 | u32 align_offset; |
6067 | u32 val; | 7008 | u32 val; |
6068 | 7009 | ||
6069 | if (offset + buf_size > bp->flash_size) { | 7010 | if (offset + buf_size > bp->common.flash_size) { |
6070 | DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +" | 7011 | DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" |
6071 | " buf_size (0x%x) > flash_size (0x%x)\n", | 7012 | " buf_size (0x%x) > flash_size (0x%x)\n", |
6072 | offset, buf_size, bp->flash_size); | 7013 | offset, buf_size, bp->common.flash_size); |
6073 | return -EINVAL; | 7014 | return -EINVAL; |
6074 | } | 7015 | } |
6075 | 7016 | ||
@@ -6093,8 +7034,6 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, | |||
6093 | * convert it back to cpu order */ | 7034 | * convert it back to cpu order */ |
6094 | val = be32_to_cpu(val); | 7035 | val = be32_to_cpu(val); |
6095 | 7036 | ||
6096 | DP(NETIF_MSG_NVM, "val 0x%08x\n", val); | ||
6097 | |||
6098 | rc = bnx2x_nvram_write_dword(bp, align_offset, val, | 7037 | rc = bnx2x_nvram_write_dword(bp, align_offset, val, |
6099 | cmd_flags); | 7038 | cmd_flags); |
6100 | } | 7039 | } |
@@ -6114,21 +7053,20 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, | |||
6114 | u32 val; | 7053 | u32 val; |
6115 | u32 written_so_far; | 7054 | u32 written_so_far; |
6116 | 7055 | ||
6117 | if (buf_size == 1) { /* ethtool */ | 7056 | if (buf_size == 1) /* ethtool */ |
6118 | return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); | 7057 | return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); |
6119 | } | ||
6120 | 7058 | ||
6121 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { | 7059 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { |
6122 | DP(NETIF_MSG_NVM, | 7060 | DP(BNX2X_MSG_NVM, |
6123 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", | 7061 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", |
6124 | offset, buf_size); | 7062 | offset, buf_size); |
6125 | return -EINVAL; | 7063 | return -EINVAL; |
6126 | } | 7064 | } |
6127 | 7065 | ||
6128 | if (offset + buf_size > bp->flash_size) { | 7066 | if (offset + buf_size > bp->common.flash_size) { |
6129 | DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +" | 7067 | DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" |
6130 | " buf_size (0x%x) > flash_size (0x%x)\n", | 7068 | " buf_size (0x%x) > flash_size (0x%x)\n", |
6131 | offset, buf_size, bp->flash_size); | 7069 | offset, buf_size, bp->common.flash_size); |
6132 | return -EINVAL; | 7070 | return -EINVAL; |
6133 | } | 7071 | } |
6134 | 7072 | ||
@@ -6151,7 +7089,6 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, | |||
6151 | cmd_flags |= MCPR_NVM_COMMAND_FIRST; | 7089 | cmd_flags |= MCPR_NVM_COMMAND_FIRST; |
6152 | 7090 | ||
6153 | memcpy(&val, data_buf, 4); | 7091 | memcpy(&val, data_buf, 4); |
6154 | DP(NETIF_MSG_NVM, "val 0x%08x\n", val); | ||
6155 | 7092 | ||
6156 | rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); | 7093 | rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); |
6157 | 7094 | ||
@@ -6175,7 +7112,7 @@ static int bnx2x_set_eeprom(struct net_device *dev, | |||
6175 | struct bnx2x *bp = netdev_priv(dev); | 7112 | struct bnx2x *bp = netdev_priv(dev); |
6176 | int rc; | 7113 | int rc; |
6177 | 7114 | ||
6178 | DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n" | 7115 | DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" |
6179 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", | 7116 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", |
6180 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, | 7117 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, |
6181 | eeprom->len, eeprom->len); | 7118 | eeprom->len, eeprom->len); |
@@ -6183,20 +7120,23 @@ static int bnx2x_set_eeprom(struct net_device *dev, | |||
6183 | /* parameters already validated in ethtool_set_eeprom */ | 7120 | /* parameters already validated in ethtool_set_eeprom */ |
6184 | 7121 | ||
6185 | /* If the magic number is PHY (0x00504859) upgrade the PHY FW */ | 7122 | /* If the magic number is PHY (0x00504859) upgrade the PHY FW */ |
6186 | if (eeprom->magic == 0x00504859) { | 7123 | if (eeprom->magic == 0x00504859) |
6187 | 7124 | if (bp->port.pmf) { | |
6188 | bnx2x_phy_hw_lock(bp); | 7125 | |
6189 | rc = bnx2x_flash_download(bp, bp->port, | 7126 | bnx2x_phy_hw_lock(bp); |
6190 | bp->link_params.ext_phy_config, | 7127 | rc = bnx2x_flash_download(bp, BP_PORT(bp), |
6191 | (bp->state != BNX2X_STATE_CLOSED), | 7128 | bp->link_params.ext_phy_config, |
6192 | eebuf, eeprom->len); | 7129 | (bp->state != BNX2X_STATE_CLOSED), |
6193 | rc |= bnx2x_link_reset(&bp->link_params, | 7130 | eebuf, eeprom->len); |
6194 | &bp->link_vars); | 7131 | rc |= bnx2x_link_reset(&bp->link_params, |
6195 | rc |= bnx2x_phy_init(&bp->link_params, | 7132 | &bp->link_vars); |
6196 | &bp->link_vars); | 7133 | rc |= bnx2x_phy_init(&bp->link_params, |
6197 | bnx2x_phy_hw_unlock(bp); | 7134 | &bp->link_vars); |
6198 | 7135 | bnx2x_phy_hw_unlock(bp); | |
6199 | } else | 7136 | |
7137 | } else /* Only the PMF can access the PHY */ | ||
7138 | return -EINVAL; | ||
7139 | else | ||
6200 | rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); | 7140 | rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); |
6201 | 7141 | ||
6202 | return rc; | 7142 | return rc; |
@@ -6234,7 +7174,7 @@ static int bnx2x_set_coalesce(struct net_device *dev, | |||
6234 | bp->stats_ticks = 0xffff00; | 7174 | bp->stats_ticks = 0xffff00; |
6235 | bp->stats_ticks &= 0xffff00; | 7175 | bp->stats_ticks &= 0xffff00; |
6236 | 7176 | ||
6237 | if (netif_running(bp->dev)) | 7177 | if (netif_running(dev)) |
6238 | bnx2x_update_coalesce(bp); | 7178 | bnx2x_update_coalesce(bp); |
6239 | 7179 | ||
6240 | return 0; | 7180 | return 0; |
@@ -6261,6 +7201,7 @@ static int bnx2x_set_ringparam(struct net_device *dev, | |||
6261 | struct ethtool_ringparam *ering) | 7201 | struct ethtool_ringparam *ering) |
6262 | { | 7202 | { |
6263 | struct bnx2x *bp = netdev_priv(dev); | 7203 | struct bnx2x *bp = netdev_priv(dev); |
7204 | int rc = 0; | ||
6264 | 7205 | ||
6265 | if ((ering->rx_pending > MAX_RX_AVAIL) || | 7206 | if ((ering->rx_pending > MAX_RX_AVAIL) || |
6266 | (ering->tx_pending > MAX_TX_AVAIL) || | 7207 | (ering->tx_pending > MAX_TX_AVAIL) || |
@@ -6270,12 +7211,12 @@ static int bnx2x_set_ringparam(struct net_device *dev, | |||
6270 | bp->rx_ring_size = ering->rx_pending; | 7211 | bp->rx_ring_size = ering->rx_pending; |
6271 | bp->tx_ring_size = ering->tx_pending; | 7212 | bp->tx_ring_size = ering->tx_pending; |
6272 | 7213 | ||
6273 | if (netif_running(bp->dev)) { | 7214 | if (netif_running(dev)) { |
6274 | bnx2x_nic_unload(bp, 0); | 7215 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); |
6275 | bnx2x_nic_load(bp, 0); | 7216 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); |
6276 | } | 7217 | } |
6277 | 7218 | ||
6278 | return 0; | 7219 | return rc; |
6279 | } | 7220 | } |
6280 | 7221 | ||
6281 | static void bnx2x_get_pauseparam(struct net_device *dev, | 7222 | static void bnx2x_get_pauseparam(struct net_device *dev, |
@@ -6301,6 +7242,9 @@ static int bnx2x_set_pauseparam(struct net_device *dev, | |||
6301 | { | 7242 | { |
6302 | struct bnx2x *bp = netdev_priv(dev); | 7243 | struct bnx2x *bp = netdev_priv(dev); |
6303 | 7244 | ||
7245 | if (IS_E1HMF(bp)) | ||
7246 | return 0; | ||
7247 | |||
6304 | DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" | 7248 | DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" |
6305 | DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", | 7249 | DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", |
6306 | epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); | 7250 | epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); |
@@ -6317,7 +7261,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev, | |||
6317 | bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; | 7261 | bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; |
6318 | 7262 | ||
6319 | if (epause->autoneg) { | 7263 | if (epause->autoneg) { |
6320 | if (!(bp->supported & SUPPORTED_Autoneg)) { | 7264 | if (!(bp->port.supported & SUPPORTED_Autoneg)) { |
6321 | DP(NETIF_MSG_LINK, "Autoneg not supported\n"); | 7265 | DP(NETIF_MSG_LINK, "Autoneg not supported\n"); |
6322 | return -EINVAL; | 7266 | return -EINVAL; |
6323 | } | 7267 | } |
@@ -6328,8 +7272,11 @@ static int bnx2x_set_pauseparam(struct net_device *dev, | |||
6328 | 7272 | ||
6329 | DP(NETIF_MSG_LINK, | 7273 | DP(NETIF_MSG_LINK, |
6330 | "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl); | 7274 | "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl); |
6331 | bnx2x_stop_stats(bp); | 7275 | |
6332 | bnx2x_link_set(bp); | 7276 | if (netif_running(dev)) { |
7277 | bnx2x_stop_stats(bp); | ||
7278 | bnx2x_link_set(bp); | ||
7279 | } | ||
6333 | 7280 | ||
6334 | return 0; | 7281 | return 0; |
6335 | } | 7282 | } |
@@ -6531,18 +7478,25 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, | |||
6531 | static int bnx2x_phys_id(struct net_device *dev, u32 data) | 7478 | static int bnx2x_phys_id(struct net_device *dev, u32 data) |
6532 | { | 7479 | { |
6533 | struct bnx2x *bp = netdev_priv(dev); | 7480 | struct bnx2x *bp = netdev_priv(dev); |
7481 | int port = BP_PORT(bp); | ||
6534 | int i; | 7482 | int i; |
6535 | 7483 | ||
7484 | if (!netif_running(dev)) | ||
7485 | return 0; | ||
7486 | |||
7487 | if (!bp->port.pmf) | ||
7488 | return 0; | ||
7489 | |||
6536 | if (data == 0) | 7490 | if (data == 0) |
6537 | data = 2; | 7491 | data = 2; |
6538 | 7492 | ||
6539 | for (i = 0; i < (data * 2); i++) { | 7493 | for (i = 0; i < (data * 2); i++) { |
6540 | if ((i % 2) == 0) | 7494 | if ((i % 2) == 0) |
6541 | bnx2x_set_led(bp, bp->port, LED_MODE_OPER, SPEED_1000, | 7495 | bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000, |
6542 | bp->link_params.hw_led_mode, | 7496 | bp->link_params.hw_led_mode, |
6543 | bp->link_params.chip_id); | 7497 | bp->link_params.chip_id); |
6544 | else | 7498 | else |
6545 | bnx2x_set_led(bp, bp->port, LED_MODE_OFF, 0, | 7499 | bnx2x_set_led(bp, port, LED_MODE_OFF, 0, |
6546 | bp->link_params.hw_led_mode, | 7500 | bp->link_params.hw_led_mode, |
6547 | bp->link_params.chip_id); | 7501 | bp->link_params.chip_id); |
6548 | 7502 | ||
@@ -6552,7 +7506,7 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data) | |||
6552 | } | 7506 | } |
6553 | 7507 | ||
6554 | if (bp->link_vars.link_up) | 7508 | if (bp->link_vars.link_up) |
6555 | bnx2x_set_led(bp, bp->port, LED_MODE_OPER, | 7509 | bnx2x_set_led(bp, port, LED_MODE_OPER, |
6556 | bp->link_vars.line_speed, | 7510 | bp->link_vars.line_speed, |
6557 | bp->link_params.hw_led_mode, | 7511 | bp->link_params.hw_led_mode, |
6558 | bp->link_params.chip_id); | 7512 | bp->link_params.chip_id); |
@@ -6609,8 +7563,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) | |||
6609 | 7563 | ||
6610 | switch (state) { | 7564 | switch (state) { |
6611 | case PCI_D0: | 7565 | case PCI_D0: |
6612 | pci_write_config_word(bp->pdev, | 7566 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, |
6613 | bp->pm_cap + PCI_PM_CTRL, | ||
6614 | ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | | 7567 | ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | |
6615 | PCI_PM_CTRL_PME_STATUS)); | 7568 | PCI_PM_CTRL_PME_STATUS)); |
6616 | 7569 | ||
@@ -6644,82 +7597,6 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) | |||
6644 | * net_device service functions | 7597 | * net_device service functions |
6645 | */ | 7598 | */ |
6646 | 7599 | ||
6647 | /* called with netif_tx_lock from set_multicast */ | ||
6648 | static void bnx2x_set_rx_mode(struct net_device *dev) | ||
6649 | { | ||
6650 | struct bnx2x *bp = netdev_priv(dev); | ||
6651 | u32 rx_mode = BNX2X_RX_MODE_NORMAL; | ||
6652 | |||
6653 | DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags); | ||
6654 | |||
6655 | if (dev->flags & IFF_PROMISC) | ||
6656 | rx_mode = BNX2X_RX_MODE_PROMISC; | ||
6657 | |||
6658 | else if ((dev->flags & IFF_ALLMULTI) || | ||
6659 | (dev->mc_count > BNX2X_MAX_MULTICAST)) | ||
6660 | rx_mode = BNX2X_RX_MODE_ALLMULTI; | ||
6661 | |||
6662 | else { /* some multicasts */ | ||
6663 | int i, old, offset; | ||
6664 | struct dev_mc_list *mclist; | ||
6665 | struct mac_configuration_cmd *config = | ||
6666 | bnx2x_sp(bp, mcast_config); | ||
6667 | |||
6668 | for (i = 0, mclist = dev->mc_list; | ||
6669 | mclist && (i < dev->mc_count); | ||
6670 | i++, mclist = mclist->next) { | ||
6671 | |||
6672 | config->config_table[i].cam_entry.msb_mac_addr = | ||
6673 | swab16(*(u16 *)&mclist->dmi_addr[0]); | ||
6674 | config->config_table[i].cam_entry.middle_mac_addr = | ||
6675 | swab16(*(u16 *)&mclist->dmi_addr[2]); | ||
6676 | config->config_table[i].cam_entry.lsb_mac_addr = | ||
6677 | swab16(*(u16 *)&mclist->dmi_addr[4]); | ||
6678 | config->config_table[i].cam_entry.flags = | ||
6679 | cpu_to_le16(bp->port); | ||
6680 | config->config_table[i].target_table_entry.flags = 0; | ||
6681 | config->config_table[i].target_table_entry. | ||
6682 | client_id = 0; | ||
6683 | config->config_table[i].target_table_entry. | ||
6684 | vlan_id = 0; | ||
6685 | |||
6686 | DP(NETIF_MSG_IFUP, | ||
6687 | "setting MCAST[%d] (%04x:%04x:%04x)\n", | ||
6688 | i, config->config_table[i].cam_entry.msb_mac_addr, | ||
6689 | config->config_table[i].cam_entry.middle_mac_addr, | ||
6690 | config->config_table[i].cam_entry.lsb_mac_addr); | ||
6691 | } | ||
6692 | old = config->hdr.length_6b; | ||
6693 | if (old > i) { | ||
6694 | for (; i < old; i++) { | ||
6695 | if (CAM_IS_INVALID(config->config_table[i])) { | ||
6696 | i--; /* already invalidated */ | ||
6697 | break; | ||
6698 | } | ||
6699 | /* invalidate */ | ||
6700 | CAM_INVALIDATE(config->config_table[i]); | ||
6701 | } | ||
6702 | } | ||
6703 | |||
6704 | if (CHIP_REV_IS_SLOW(bp)) | ||
6705 | offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port); | ||
6706 | else | ||
6707 | offset = BNX2X_MAX_MULTICAST*(1 + bp->port); | ||
6708 | |||
6709 | config->hdr.length_6b = i; | ||
6710 | config->hdr.offset = offset; | ||
6711 | config->hdr.reserved0 = 0; | ||
6712 | config->hdr.reserved1 = 0; | ||
6713 | |||
6714 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | ||
6715 | U64_HI(bnx2x_sp_mapping(bp, mcast_config)), | ||
6716 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); | ||
6717 | } | ||
6718 | |||
6719 | bp->rx_mode = rx_mode; | ||
6720 | bnx2x_set_storm_rx_mode(bp); | ||
6721 | } | ||
6722 | |||
6723 | static int bnx2x_poll(struct napi_struct *napi, int budget) | 7600 | static int bnx2x_poll(struct napi_struct *napi, int budget) |
6724 | { | 7601 | { |
6725 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, | 7602 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, |
@@ -6729,7 +7606,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
6729 | 7606 | ||
6730 | #ifdef BNX2X_STOP_ON_ERROR | 7607 | #ifdef BNX2X_STOP_ON_ERROR |
6731 | if (unlikely(bp->panic)) | 7608 | if (unlikely(bp->panic)) |
6732 | goto out_panic; | 7609 | goto poll_panic; |
6733 | #endif | 7610 | #endif |
6734 | 7611 | ||
6735 | prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb); | 7612 | prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb); |
@@ -6738,30 +7615,28 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
6738 | 7615 | ||
6739 | bnx2x_update_fpsb_idx(fp); | 7616 | bnx2x_update_fpsb_idx(fp); |
6740 | 7617 | ||
6741 | if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons) | 7618 | if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || |
7619 | (fp->tx_pkt_prod != fp->tx_pkt_cons)) | ||
6742 | bnx2x_tx_int(fp, budget); | 7620 | bnx2x_tx_int(fp, budget); |
6743 | 7621 | ||
6744 | |||
6745 | if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) | 7622 | if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) |
6746 | work_done = bnx2x_rx_int(fp, budget); | 7623 | work_done = bnx2x_rx_int(fp, budget); |
6747 | 7624 | ||
6748 | |||
6749 | rmb(); /* bnx2x_has_work() reads the status block */ | 7625 | rmb(); /* bnx2x_has_work() reads the status block */ |
6750 | 7626 | ||
6751 | /* must not complete if we consumed full budget */ | 7627 | /* must not complete if we consumed full budget */ |
6752 | if ((work_done < budget) && !bnx2x_has_work(fp)) { | 7628 | if ((work_done < budget) && !bnx2x_has_work(fp)) { |
6753 | 7629 | ||
6754 | #ifdef BNX2X_STOP_ON_ERROR | 7630 | #ifdef BNX2X_STOP_ON_ERROR |
6755 | out_panic: | 7631 | poll_panic: |
6756 | #endif | 7632 | #endif |
6757 | netif_rx_complete(bp->dev, napi); | 7633 | netif_rx_complete(bp->dev, napi); |
6758 | 7634 | ||
6759 | bnx2x_ack_sb(bp, fp->index, USTORM_ID, | 7635 | bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, |
6760 | le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); | 7636 | le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); |
6761 | bnx2x_ack_sb(bp, fp->index, CSTORM_ID, | 7637 | bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID, |
6762 | le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); | 7638 | le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); |
6763 | } | 7639 | } |
6764 | |||
6765 | return work_done; | 7640 | return work_done; |
6766 | } | 7641 | } |
6767 | 7642 | ||
@@ -7055,18 +7930,145 @@ static int bnx2x_close(struct net_device *dev) | |||
7055 | return 0; | 7930 | return 0; |
7056 | } | 7931 | } |
7057 | 7932 | ||
7058 | /* Called with rtnl_lock */ | 7933 | /* called with netif_tx_lock from set_multicast */ |
7934 | static void bnx2x_set_rx_mode(struct net_device *dev) | ||
7935 | { | ||
7936 | struct bnx2x *bp = netdev_priv(dev); | ||
7937 | u32 rx_mode = BNX2X_RX_MODE_NORMAL; | ||
7938 | int port = BP_PORT(bp); | ||
7939 | |||
7940 | if (bp->state != BNX2X_STATE_OPEN) { | ||
7941 | DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); | ||
7942 | return; | ||
7943 | } | ||
7944 | |||
7945 | DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags); | ||
7946 | |||
7947 | if (dev->flags & IFF_PROMISC) | ||
7948 | rx_mode = BNX2X_RX_MODE_PROMISC; | ||
7949 | |||
7950 | else if ((dev->flags & IFF_ALLMULTI) || | ||
7951 | ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp))) | ||
7952 | rx_mode = BNX2X_RX_MODE_ALLMULTI; | ||
7953 | |||
7954 | else { /* some multicasts */ | ||
7955 | if (CHIP_IS_E1(bp)) { | ||
7956 | int i, old, offset; | ||
7957 | struct dev_mc_list *mclist; | ||
7958 | struct mac_configuration_cmd *config = | ||
7959 | bnx2x_sp(bp, mcast_config); | ||
7960 | |||
7961 | for (i = 0, mclist = dev->mc_list; | ||
7962 | mclist && (i < dev->mc_count); | ||
7963 | i++, mclist = mclist->next) { | ||
7964 | |||
7965 | config->config_table[i]. | ||
7966 | cam_entry.msb_mac_addr = | ||
7967 | swab16(*(u16 *)&mclist->dmi_addr[0]); | ||
7968 | config->config_table[i]. | ||
7969 | cam_entry.middle_mac_addr = | ||
7970 | swab16(*(u16 *)&mclist->dmi_addr[2]); | ||
7971 | config->config_table[i]. | ||
7972 | cam_entry.lsb_mac_addr = | ||
7973 | swab16(*(u16 *)&mclist->dmi_addr[4]); | ||
7974 | config->config_table[i].cam_entry.flags = | ||
7975 | cpu_to_le16(port); | ||
7976 | config->config_table[i]. | ||
7977 | target_table_entry.flags = 0; | ||
7978 | config->config_table[i]. | ||
7979 | target_table_entry.client_id = 0; | ||
7980 | config->config_table[i]. | ||
7981 | target_table_entry.vlan_id = 0; | ||
7982 | |||
7983 | DP(NETIF_MSG_IFUP, | ||
7984 | "setting MCAST[%d] (%04x:%04x:%04x)\n", i, | ||
7985 | config->config_table[i]. | ||
7986 | cam_entry.msb_mac_addr, | ||
7987 | config->config_table[i]. | ||
7988 | cam_entry.middle_mac_addr, | ||
7989 | config->config_table[i]. | ||
7990 | cam_entry.lsb_mac_addr); | ||
7991 | } | ||
7992 | old = config->hdr.length_6b; | ||
7993 | if (old > i) { | ||
7994 | for (; i < old; i++) { | ||
7995 | if (CAM_IS_INVALID(config-> | ||
7996 | config_table[i])) { | ||
7997 | i--; /* already invalidated */ | ||
7998 | break; | ||
7999 | } | ||
8000 | /* invalidate */ | ||
8001 | CAM_INVALIDATE(config-> | ||
8002 | config_table[i]); | ||
8003 | } | ||
8004 | } | ||
8005 | |||
8006 | if (CHIP_REV_IS_SLOW(bp)) | ||
8007 | offset = BNX2X_MAX_EMUL_MULTI*(1 + port); | ||
8008 | else | ||
8009 | offset = BNX2X_MAX_MULTICAST*(1 + port); | ||
8010 | |||
8011 | config->hdr.length_6b = i; | ||
8012 | config->hdr.offset = offset; | ||
8013 | config->hdr.client_id = BP_CL_ID(bp); | ||
8014 | config->hdr.reserved1 = 0; | ||
8015 | |||
8016 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | ||
8017 | U64_HI(bnx2x_sp_mapping(bp, mcast_config)), | ||
8018 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), | ||
8019 | 0); | ||
8020 | } else { /* E1H */ | ||
8021 | /* Accept one or more multicasts */ | ||
8022 | struct dev_mc_list *mclist; | ||
8023 | u32 mc_filter[MC_HASH_SIZE]; | ||
8024 | u32 crc, bit, regidx; | ||
8025 | int i; | ||
8026 | |||
8027 | memset(mc_filter, 0, 4 * MC_HASH_SIZE); | ||
8028 | |||
8029 | for (i = 0, mclist = dev->mc_list; | ||
8030 | mclist && (i < dev->mc_count); | ||
8031 | i++, mclist = mclist->next) { | ||
8032 | |||
8033 | DP(NETIF_MSG_IFUP, "Adding mcast MAC: " | ||
8034 | "%02x:%02x:%02x:%02x:%02x:%02x\n", | ||
8035 | mclist->dmi_addr[0], mclist->dmi_addr[1], | ||
8036 | mclist->dmi_addr[2], mclist->dmi_addr[3], | ||
8037 | mclist->dmi_addr[4], mclist->dmi_addr[5]); | ||
8038 | |||
8039 | crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN); | ||
8040 | bit = (crc >> 24) & 0xff; | ||
8041 | regidx = bit >> 5; | ||
8042 | bit &= 0x1f; | ||
8043 | mc_filter[regidx] |= (1 << bit); | ||
8044 | } | ||
8045 | |||
8046 | for (i = 0; i < MC_HASH_SIZE; i++) | ||
8047 | REG_WR(bp, MC_HASH_OFFSET(bp, i), | ||
8048 | mc_filter[i]); | ||
8049 | } | ||
8050 | } | ||
8051 | |||
8052 | bp->rx_mode = rx_mode; | ||
8053 | bnx2x_set_storm_rx_mode(bp); | ||
8054 | } | ||
8055 | |||
8056 | /* called with rtnl_lock */ | ||
7059 | static int bnx2x_change_mac_addr(struct net_device *dev, void *p) | 8057 | static int bnx2x_change_mac_addr(struct net_device *dev, void *p) |
7060 | { | 8058 | { |
7061 | struct sockaddr *addr = p; | 8059 | struct sockaddr *addr = p; |
7062 | struct bnx2x *bp = netdev_priv(dev); | 8060 | struct bnx2x *bp = netdev_priv(dev); |
7063 | 8061 | ||
7064 | if (!is_valid_ether_addr(addr->sa_data)) | 8062 | if (!is_valid_ether_addr((u8 *)(addr->sa_data))) |
7065 | return -EINVAL; | 8063 | return -EINVAL; |
7066 | 8064 | ||
7067 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 8065 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
7068 | if (netif_running(dev)) | 8066 | if (netif_running(dev)) { |
7069 | bnx2x_set_mac_addr(bp); | 8067 | if (CHIP_IS_E1(bp)) |
8068 | bnx2x_set_mac_addr_e1(bp); | ||
8069 | else | ||
8070 | bnx2x_set_mac_addr_e1h(bp); | ||
8071 | } | ||
7070 | 8072 | ||
7071 | return 0; | 8073 | return 0; |
7072 | } | 8074 | } |
@@ -7080,7 +8082,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
7080 | 8082 | ||
7081 | switch (cmd) { | 8083 | switch (cmd) { |
7082 | case SIOCGMIIPHY: | 8084 | case SIOCGMIIPHY: |
7083 | data->phy_id = bp->phy_addr; | 8085 | data->phy_id = bp->port.phy_addr; |
7084 | 8086 | ||
7085 | /* fallthrough */ | 8087 | /* fallthrough */ |
7086 | 8088 | ||
@@ -7090,12 +8092,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
7090 | if (!netif_running(dev)) | 8092 | if (!netif_running(dev)) |
7091 | return -EAGAIN; | 8093 | return -EAGAIN; |
7092 | 8094 | ||
7093 | mutex_lock(&bp->phy_mutex); | 8095 | mutex_lock(&bp->port.phy_mutex); |
7094 | err = bnx2x_cl45_read(bp, bp->port, 0, bp->phy_addr, | 8096 | err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr, |
7095 | DEFAULT_PHY_DEV_ADDR, | 8097 | DEFAULT_PHY_DEV_ADDR, |
7096 | (data->reg_num & 0x1f), &mii_regval); | 8098 | (data->reg_num & 0x1f), &mii_regval); |
7097 | data->val_out = mii_regval; | 8099 | data->val_out = mii_regval; |
7098 | mutex_unlock(&bp->phy_mutex); | 8100 | mutex_unlock(&bp->port.phy_mutex); |
7099 | return err; | 8101 | return err; |
7100 | } | 8102 | } |
7101 | 8103 | ||
@@ -7106,11 +8108,11 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
7106 | if (!netif_running(dev)) | 8108 | if (!netif_running(dev)) |
7107 | return -EAGAIN; | 8109 | return -EAGAIN; |
7108 | 8110 | ||
7109 | mutex_lock(&bp->phy_mutex); | 8111 | mutex_lock(&bp->port.phy_mutex); |
7110 | err = bnx2x_cl45_write(bp, bp->port, 0, bp->phy_addr, | 8112 | err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr, |
7111 | DEFAULT_PHY_DEV_ADDR, | 8113 | DEFAULT_PHY_DEV_ADDR, |
7112 | (data->reg_num & 0x1f), data->val_in); | 8114 | (data->reg_num & 0x1f), data->val_in); |
7113 | mutex_unlock(&bp->phy_mutex); | 8115 | mutex_unlock(&bp->port.phy_mutex); |
7114 | return err; | 8116 | return err; |
7115 | 8117 | ||
7116 | default: | 8118 | default: |
@@ -7121,10 +8123,11 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
7121 | return -EOPNOTSUPP; | 8123 | return -EOPNOTSUPP; |
7122 | } | 8124 | } |
7123 | 8125 | ||
7124 | /* Called with rtnl_lock */ | 8126 | /* called with rtnl_lock */ |
7125 | static int bnx2x_change_mtu(struct net_device *dev, int new_mtu) | 8127 | static int bnx2x_change_mtu(struct net_device *dev, int new_mtu) |
7126 | { | 8128 | { |
7127 | struct bnx2x *bp = netdev_priv(dev); | 8129 | struct bnx2x *bp = netdev_priv(dev); |
8130 | int rc = 0; | ||
7128 | 8131 | ||
7129 | if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || | 8132 | if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || |
7130 | ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) | 8133 | ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) |
@@ -7137,10 +8140,11 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu) | |||
7137 | dev->mtu = new_mtu; | 8140 | dev->mtu = new_mtu; |
7138 | 8141 | ||
7139 | if (netif_running(dev)) { | 8142 | if (netif_running(dev)) { |
7140 | bnx2x_nic_unload(bp, 0); | 8143 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); |
7141 | bnx2x_nic_load(bp, 0); | 8144 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); |
7142 | } | 8145 | } |
7143 | return 0; | 8146 | |
8147 | return rc; | ||
7144 | } | 8148 | } |
7145 | 8149 | ||
7146 | static void bnx2x_tx_timeout(struct net_device *dev) | 8150 | static void bnx2x_tx_timeout(struct net_device *dev) |
@@ -7156,7 +8160,7 @@ static void bnx2x_tx_timeout(struct net_device *dev) | |||
7156 | } | 8160 | } |
7157 | 8161 | ||
7158 | #ifdef BCM_VLAN | 8162 | #ifdef BCM_VLAN |
7159 | /* Called with rtnl_lock */ | 8163 | /* called with rtnl_lock */ |
7160 | static void bnx2x_vlan_rx_register(struct net_device *dev, | 8164 | static void bnx2x_vlan_rx_register(struct net_device *dev, |
7161 | struct vlan_group *vlgrp) | 8165 | struct vlan_group *vlgrp) |
7162 | { | 8166 | { |
@@ -7166,6 +8170,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev, | |||
7166 | if (netif_running(dev)) | 8170 | if (netif_running(dev)) |
7167 | bnx2x_set_client_config(bp); | 8171 | bnx2x_set_client_config(bp); |
7168 | } | 8172 | } |
8173 | |||
7169 | #endif | 8174 | #endif |
7170 | 8175 | ||
7171 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) | 8176 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) |
@@ -7179,36 +8184,8 @@ static void poll_bnx2x(struct net_device *dev) | |||
7179 | } | 8184 | } |
7180 | #endif | 8185 | #endif |
7181 | 8186 | ||
7182 | static void bnx2x_reset_task(struct work_struct *work) | 8187 | static int __devinit bnx2x_init_dev(struct pci_dev *pdev, |
7183 | { | 8188 | struct net_device *dev) |
7184 | struct bnx2x *bp = container_of(work, struct bnx2x, reset_task); | ||
7185 | |||
7186 | #ifdef BNX2X_STOP_ON_ERROR | ||
7187 | BNX2X_ERR("reset task called but STOP_ON_ERROR defined" | ||
7188 | " so reset not done to allow debug dump,\n" | ||
7189 | KERN_ERR " you will need to reboot when done\n"); | ||
7190 | return; | ||
7191 | #endif | ||
7192 | |||
7193 | if (!netif_running(bp->dev)) | ||
7194 | return; | ||
7195 | |||
7196 | rtnl_lock(); | ||
7197 | |||
7198 | if (bp->state != BNX2X_STATE_OPEN) { | ||
7199 | DP(NETIF_MSG_TX_ERR, "state is %x, returning\n", bp->state); | ||
7200 | goto reset_task_exit; | ||
7201 | } | ||
7202 | |||
7203 | bnx2x_nic_unload(bp, 0); | ||
7204 | bnx2x_nic_load(bp, 0); | ||
7205 | |||
7206 | reset_task_exit: | ||
7207 | rtnl_unlock(); | ||
7208 | } | ||
7209 | |||
7210 | static int __devinit bnx2x_init_board(struct pci_dev *pdev, | ||
7211 | struct net_device *dev) | ||
7212 | { | 8189 | { |
7213 | struct bnx2x *bp; | 8190 | struct bnx2x *bp; |
7214 | int rc; | 8191 | int rc; |
@@ -7216,8 +8193,10 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev, | |||
7216 | SET_NETDEV_DEV(dev, &pdev->dev); | 8193 | SET_NETDEV_DEV(dev, &pdev->dev); |
7217 | bp = netdev_priv(dev); | 8194 | bp = netdev_priv(dev); |
7218 | 8195 | ||
8196 | bp->dev = dev; | ||
8197 | bp->pdev = pdev; | ||
7219 | bp->flags = 0; | 8198 | bp->flags = 0; |
7220 | bp->port = PCI_FUNC(pdev->devfn); | 8199 | bp->func = PCI_FUNC(pdev->devfn); |
7221 | 8200 | ||
7222 | rc = pci_enable_device(pdev); | 8201 | rc = pci_enable_device(pdev); |
7223 | if (rc) { | 8202 | if (rc) { |
@@ -7239,14 +8218,17 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev, | |||
7239 | goto err_out_disable; | 8218 | goto err_out_disable; |
7240 | } | 8219 | } |
7241 | 8220 | ||
7242 | rc = pci_request_regions(pdev, DRV_MODULE_NAME); | 8221 | if (atomic_read(&pdev->enable_cnt) == 1) { |
7243 | if (rc) { | 8222 | rc = pci_request_regions(pdev, DRV_MODULE_NAME); |
7244 | printk(KERN_ERR PFX "Cannot obtain PCI resources," | 8223 | if (rc) { |
7245 | " aborting\n"); | 8224 | printk(KERN_ERR PFX "Cannot obtain PCI resources," |
7246 | goto err_out_disable; | 8225 | " aborting\n"); |
7247 | } | 8226 | goto err_out_disable; |
8227 | } | ||
7248 | 8228 | ||
7249 | pci_set_master(pdev); | 8229 | pci_set_master(pdev); |
8230 | pci_save_state(pdev); | ||
8231 | } | ||
7250 | 8232 | ||
7251 | bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); | 8233 | bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); |
7252 | if (bp->pm_cap == 0) { | 8234 | if (bp->pm_cap == 0) { |
@@ -7280,13 +8262,9 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev, | |||
7280 | goto err_out_release; | 8262 | goto err_out_release; |
7281 | } | 8263 | } |
7282 | 8264 | ||
7283 | bp->dev = dev; | 8265 | dev->mem_start = pci_resource_start(pdev, 0); |
7284 | bp->pdev = pdev; | 8266 | dev->base_addr = dev->mem_start; |
7285 | 8267 | dev->mem_end = pci_resource_end(pdev, 0); | |
7286 | INIT_WORK(&bp->reset_task, bnx2x_reset_task); | ||
7287 | INIT_WORK(&bp->sp_task, bnx2x_sp_task); | ||
7288 | |||
7289 | dev->base_addr = pci_resource_start(pdev, 0); | ||
7290 | 8268 | ||
7291 | dev->irq = pdev->irq; | 8269 | dev->irq = pdev->irq; |
7292 | 8270 | ||
@@ -7298,8 +8276,9 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev, | |||
7298 | goto err_out_release; | 8276 | goto err_out_release; |
7299 | } | 8277 | } |
7300 | 8278 | ||
7301 | bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2), | 8279 | bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), |
7302 | pci_resource_len(pdev, 2)); | 8280 | min_t(u64, BNX2X_DB_SIZE, |
8281 | pci_resource_len(pdev, 2))); | ||
7303 | if (!bp->doorbells) { | 8282 | if (!bp->doorbells) { |
7304 | printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n"); | 8283 | printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n"); |
7305 | rc = -ENOMEM; | 8284 | rc = -ENOMEM; |
@@ -7308,47 +8287,43 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev, | |||
7308 | 8287 | ||
7309 | bnx2x_set_power_state(bp, PCI_D0); | 8288 | bnx2x_set_power_state(bp, PCI_D0); |
7310 | 8289 | ||
7311 | bnx2x_get_hwinfo(bp); | 8290 | /* clean indirect addresses */ |
7312 | 8291 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, | |
7313 | 8292 | PCICFG_VENDOR_ID_OFFSET); | |
7314 | if (nomcp) { | 8293 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0); |
7315 | printk(KERN_ERR PFX "MCP disabled, will only" | 8294 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0); |
7316 | " init first device\n"); | 8295 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); |
7317 | onefunc = 1; | 8296 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); |
7318 | } | ||
7319 | |||
7320 | if (onefunc && bp->port) { | ||
7321 | printk(KERN_ERR PFX "Second device disabled, exiting\n"); | ||
7322 | rc = -ENODEV; | ||
7323 | goto err_out_unmap; | ||
7324 | } | ||
7325 | |||
7326 | bp->tx_ring_size = MAX_TX_AVAIL; | ||
7327 | bp->rx_ring_size = MAX_RX_AVAIL; | ||
7328 | |||
7329 | bp->rx_csum = 1; | ||
7330 | |||
7331 | bp->rx_offset = 0; | ||
7332 | |||
7333 | bp->tx_quick_cons_trip_int = 0xff; | ||
7334 | bp->tx_quick_cons_trip = 0xff; | ||
7335 | bp->tx_ticks_int = 50; | ||
7336 | bp->tx_ticks = 50; | ||
7337 | 8297 | ||
7338 | bp->rx_quick_cons_trip_int = 0xff; | 8298 | dev->hard_start_xmit = bnx2x_start_xmit; |
7339 | bp->rx_quick_cons_trip = 0xff; | 8299 | dev->watchdog_timeo = TX_TIMEOUT; |
7340 | bp->rx_ticks_int = 25; | ||
7341 | bp->rx_ticks = 25; | ||
7342 | 8300 | ||
7343 | bp->stats_ticks = 1000000 & 0xffff00; | 8301 | dev->ethtool_ops = &bnx2x_ethtool_ops; |
8302 | dev->open = bnx2x_open; | ||
8303 | dev->stop = bnx2x_close; | ||
8304 | dev->set_multicast_list = bnx2x_set_rx_mode; | ||
8305 | dev->set_mac_address = bnx2x_change_mac_addr; | ||
8306 | dev->do_ioctl = bnx2x_ioctl; | ||
8307 | dev->change_mtu = bnx2x_change_mtu; | ||
8308 | dev->tx_timeout = bnx2x_tx_timeout; | ||
8309 | #ifdef BCM_VLAN | ||
8310 | dev->vlan_rx_register = bnx2x_vlan_rx_register; | ||
8311 | #endif | ||
8312 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) | ||
8313 | dev->poll_controller = poll_bnx2x; | ||
8314 | #endif | ||
8315 | dev->features |= NETIF_F_SG; | ||
8316 | dev->features |= NETIF_F_HW_CSUM; | ||
8317 | if (bp->flags & USING_DAC_FLAG) | ||
8318 | dev->features |= NETIF_F_HIGHDMA; | ||
8319 | #ifdef BCM_VLAN | ||
8320 | dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); | ||
8321 | #endif | ||
8322 | dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); | ||
7344 | 8323 | ||
7345 | bp->timer_interval = HZ; | 8324 | bp->timer_interval = HZ; |
7346 | bp->current_interval = (poll ? poll : HZ); | 8325 | bp->current_interval = (poll ? poll : HZ); |
7347 | 8326 | ||
7348 | init_timer(&bp->timer); | ||
7349 | bp->timer.expires = jiffies + bp->current_interval; | ||
7350 | bp->timer.data = (unsigned long) bp; | ||
7351 | bp->timer.function = bnx2x_timer; | ||
7352 | 8327 | ||
7353 | return 0; | 8328 | return 0; |
7354 | 8329 | ||
@@ -7357,14 +8332,14 @@ err_out_unmap: | |||
7357 | iounmap(bp->regview); | 8332 | iounmap(bp->regview); |
7358 | bp->regview = NULL; | 8333 | bp->regview = NULL; |
7359 | } | 8334 | } |
7360 | |||
7361 | if (bp->doorbells) { | 8335 | if (bp->doorbells) { |
7362 | iounmap(bp->doorbells); | 8336 | iounmap(bp->doorbells); |
7363 | bp->doorbells = NULL; | 8337 | bp->doorbells = NULL; |
7364 | } | 8338 | } |
7365 | 8339 | ||
7366 | err_out_release: | 8340 | err_out_release: |
7367 | pci_release_regions(pdev); | 8341 | if (atomic_read(&pdev->enable_cnt) == 1) |
8342 | pci_release_regions(pdev); | ||
7368 | 8343 | ||
7369 | err_out_disable: | 8344 | err_out_disable: |
7370 | pci_disable_device(pdev); | 8345 | pci_disable_device(pdev); |
@@ -7398,7 +8373,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
7398 | struct net_device *dev = NULL; | 8373 | struct net_device *dev = NULL; |
7399 | struct bnx2x *bp; | 8374 | struct bnx2x *bp; |
7400 | int rc; | 8375 | int rc; |
7401 | int port = PCI_FUNC(pdev->devfn); | ||
7402 | DECLARE_MAC_BUF(mac); | 8376 | DECLARE_MAC_BUF(mac); |
7403 | 8377 | ||
7404 | if (version_printed++ == 0) | 8378 | if (version_printed++ == 0) |
@@ -7406,78 +8380,62 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
7406 | 8380 | ||
7407 | /* dev zeroed in init_etherdev */ | 8381 | /* dev zeroed in init_etherdev */ |
7408 | dev = alloc_etherdev(sizeof(*bp)); | 8382 | dev = alloc_etherdev(sizeof(*bp)); |
7409 | if (!dev) | 8383 | if (!dev) { |
8384 | printk(KERN_ERR PFX "Cannot allocate net device\n"); | ||
7410 | return -ENOMEM; | 8385 | return -ENOMEM; |
8386 | } | ||
7411 | 8387 | ||
7412 | netif_carrier_off(dev); | 8388 | netif_carrier_off(dev); |
7413 | 8389 | ||
7414 | bp = netdev_priv(dev); | 8390 | bp = netdev_priv(dev); |
7415 | bp->msglevel = debug; | 8391 | bp->msglevel = debug; |
7416 | 8392 | ||
7417 | if (port && onefunc) { | 8393 | rc = bnx2x_init_dev(pdev, dev); |
7418 | printk(KERN_ERR PFX "second function disabled. exiting\n"); | ||
7419 | free_netdev(dev); | ||
7420 | return 0; | ||
7421 | } | ||
7422 | |||
7423 | rc = bnx2x_init_board(pdev, dev); | ||
7424 | if (rc < 0) { | 8394 | if (rc < 0) { |
7425 | free_netdev(dev); | 8395 | free_netdev(dev); |
7426 | return rc; | 8396 | return rc; |
7427 | } | 8397 | } |
7428 | 8398 | ||
7429 | dev->hard_start_xmit = bnx2x_start_xmit; | ||
7430 | dev->watchdog_timeo = TX_TIMEOUT; | ||
7431 | |||
7432 | dev->ethtool_ops = &bnx2x_ethtool_ops; | ||
7433 | dev->open = bnx2x_open; | ||
7434 | dev->stop = bnx2x_close; | ||
7435 | dev->set_multicast_list = bnx2x_set_rx_mode; | ||
7436 | dev->set_mac_address = bnx2x_change_mac_addr; | ||
7437 | dev->do_ioctl = bnx2x_ioctl; | ||
7438 | dev->change_mtu = bnx2x_change_mtu; | ||
7439 | dev->tx_timeout = bnx2x_tx_timeout; | ||
7440 | #ifdef BCM_VLAN | ||
7441 | dev->vlan_rx_register = bnx2x_vlan_rx_register; | ||
7442 | #endif | ||
7443 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) | ||
7444 | dev->poll_controller = poll_bnx2x; | ||
7445 | #endif | ||
7446 | dev->features |= NETIF_F_SG; | ||
7447 | if (bp->flags & USING_DAC_FLAG) | ||
7448 | dev->features |= NETIF_F_HIGHDMA; | ||
7449 | dev->features |= NETIF_F_IP_CSUM; | ||
7450 | #ifdef BCM_VLAN | ||
7451 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
7452 | #endif | ||
7453 | dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; | ||
7454 | |||
7455 | rc = register_netdev(dev); | 8399 | rc = register_netdev(dev); |
7456 | if (rc) { | 8400 | if (rc) { |
7457 | dev_err(&pdev->dev, "Cannot register net device\n"); | 8401 | dev_err(&pdev->dev, "Cannot register net device\n"); |
7458 | if (bp->regview) | 8402 | goto init_one_exit; |
7459 | iounmap(bp->regview); | ||
7460 | if (bp->doorbells) | ||
7461 | iounmap(bp->doorbells); | ||
7462 | pci_release_regions(pdev); | ||
7463 | pci_disable_device(pdev); | ||
7464 | pci_set_drvdata(pdev, NULL); | ||
7465 | free_netdev(dev); | ||
7466 | return rc; | ||
7467 | } | 8403 | } |
7468 | 8404 | ||
7469 | pci_set_drvdata(pdev, dev); | 8405 | pci_set_drvdata(pdev, dev); |
7470 | 8406 | ||
7471 | bp->name = board_info[ent->driver_data].name; | 8407 | rc = bnx2x_init_bp(bp); |
8408 | if (rc) { | ||
8409 | unregister_netdev(dev); | ||
8410 | goto init_one_exit; | ||
8411 | } | ||
8412 | |||
8413 | bp->common.name = board_info[ent->driver_data].name; | ||
7472 | printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," | 8414 | printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," |
7473 | " IRQ %d, ", dev->name, bp->name, | 8415 | " IRQ %d, ", dev->name, bp->common.name, |
7474 | ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', | 8416 | (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), |
7475 | ((CHIP_ID(bp) & 0x0ff0) >> 4), | ||
7476 | bnx2x_get_pcie_width(bp), | 8417 | bnx2x_get_pcie_width(bp), |
7477 | (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz", | 8418 | (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz", |
7478 | dev->base_addr, bp->pdev->irq); | 8419 | dev->base_addr, bp->pdev->irq); |
7479 | printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr)); | 8420 | printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr)); |
7480 | return 0; | 8421 | return 0; |
8422 | |||
8423 | init_one_exit: | ||
8424 | if (bp->regview) | ||
8425 | iounmap(bp->regview); | ||
8426 | |||
8427 | if (bp->doorbells) | ||
8428 | iounmap(bp->doorbells); | ||
8429 | |||
8430 | free_netdev(dev); | ||
8431 | |||
8432 | if (atomic_read(&pdev->enable_cnt) == 1) | ||
8433 | pci_release_regions(pdev); | ||
8434 | |||
8435 | pci_disable_device(pdev); | ||
8436 | pci_set_drvdata(pdev, NULL); | ||
8437 | |||
8438 | return rc; | ||
7481 | } | 8439 | } |
7482 | 8440 | ||
7483 | static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | 8441 | static void __devexit bnx2x_remove_one(struct pci_dev *pdev) |
@@ -7486,11 +8444,9 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | |||
7486 | struct bnx2x *bp; | 8444 | struct bnx2x *bp; |
7487 | 8445 | ||
7488 | if (!dev) { | 8446 | if (!dev) { |
7489 | /* we get here if init_one() fails */ | ||
7490 | printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); | 8447 | printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); |
7491 | return; | 8448 | return; |
7492 | } | 8449 | } |
7493 | |||
7494 | bp = netdev_priv(dev); | 8450 | bp = netdev_priv(dev); |
7495 | 8451 | ||
7496 | unregister_netdev(dev); | 8452 | unregister_netdev(dev); |
@@ -7502,7 +8458,10 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | |||
7502 | iounmap(bp->doorbells); | 8458 | iounmap(bp->doorbells); |
7503 | 8459 | ||
7504 | free_netdev(dev); | 8460 | free_netdev(dev); |
7505 | pci_release_regions(pdev); | 8461 | |
8462 | if (atomic_read(&pdev->enable_cnt) == 1) | ||
8463 | pci_release_regions(pdev); | ||
8464 | |||
7506 | pci_disable_device(pdev); | 8465 | pci_disable_device(pdev); |
7507 | pci_set_drvdata(pdev, NULL); | 8466 | pci_set_drvdata(pdev, NULL); |
7508 | } | 8467 | } |
@@ -7512,21 +8471,29 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) | |||
7512 | struct net_device *dev = pci_get_drvdata(pdev); | 8471 | struct net_device *dev = pci_get_drvdata(pdev); |
7513 | struct bnx2x *bp; | 8472 | struct bnx2x *bp; |
7514 | 8473 | ||
7515 | if (!dev) | 8474 | if (!dev) { |
7516 | return 0; | 8475 | printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); |
8476 | return -ENODEV; | ||
8477 | } | ||
8478 | bp = netdev_priv(dev); | ||
7517 | 8479 | ||
7518 | if (!netif_running(dev)) | 8480 | rtnl_lock(); |
7519 | return 0; | ||
7520 | 8481 | ||
7521 | bp = netdev_priv(dev); | 8482 | pci_save_state(pdev); |
7522 | 8483 | ||
7523 | bnx2x_nic_unload(bp, 0); | 8484 | if (!netif_running(dev)) { |
8485 | rtnl_unlock(); | ||
8486 | return 0; | ||
8487 | } | ||
7524 | 8488 | ||
7525 | netif_device_detach(dev); | 8489 | netif_device_detach(dev); |
7526 | 8490 | ||
7527 | pci_save_state(pdev); | 8491 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); |
8492 | |||
7528 | bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); | 8493 | bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); |
7529 | 8494 | ||
8495 | rtnl_unlock(); | ||
8496 | |||
7530 | return 0; | 8497 | return 0; |
7531 | } | 8498 | } |
7532 | 8499 | ||
@@ -7540,21 +8507,25 @@ static int bnx2x_resume(struct pci_dev *pdev) | |||
7540 | printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); | 8507 | printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); |
7541 | return -ENODEV; | 8508 | return -ENODEV; |
7542 | } | 8509 | } |
7543 | |||
7544 | if (!netif_running(dev)) | ||
7545 | return 0; | ||
7546 | |||
7547 | bp = netdev_priv(dev); | 8510 | bp = netdev_priv(dev); |
7548 | 8511 | ||
8512 | rtnl_lock(); | ||
8513 | |||
7549 | pci_restore_state(pdev); | 8514 | pci_restore_state(pdev); |
8515 | |||
8516 | if (!netif_running(dev)) { | ||
8517 | rtnl_unlock(); | ||
8518 | return 0; | ||
8519 | } | ||
8520 | |||
7550 | bnx2x_set_power_state(bp, PCI_D0); | 8521 | bnx2x_set_power_state(bp, PCI_D0); |
7551 | netif_device_attach(dev); | 8522 | netif_device_attach(dev); |
7552 | 8523 | ||
7553 | rc = bnx2x_nic_load(bp, 0); | 8524 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); |
7554 | if (rc) | ||
7555 | return rc; | ||
7556 | 8525 | ||
7557 | return 0; | 8526 | rtnl_unlock(); |
8527 | |||
8528 | return rc; | ||
7558 | } | 8529 | } |
7559 | 8530 | ||
7560 | static struct pci_driver bnx2x_pci_driver = { | 8531 | static struct pci_driver bnx2x_pci_driver = { |