aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c1777
1 files changed, 1364 insertions, 413 deletions
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 2c04b97f85a9..0ac416a14202 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -23,7 +23,6 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h> 26#include <linux/interrupt.h>
28#include <linux/pci.h> 27#include <linux/pci.h>
29#include <linux/init.h> 28#include <linux/init.h>
@@ -68,6 +67,7 @@
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" 68#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71 71
72/* Time in jiffies before concluding the transmitter is hung */ 72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ) 73#define TX_TIMEOUT (5*HZ)
@@ -77,11 +77,13 @@ static char version[] __devinitdata =
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78 78
79MODULE_AUTHOR("Eliezer Tamir"); 79MODULE_AUTHOR("Eliezer Tamir");
80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); 80MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
81MODULE_LICENSE("GPL"); 82MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION); 83MODULE_VERSION(DRV_MODULE_VERSION);
83MODULE_FIRMWARE(FW_FILE_NAME_E1); 84MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H); 85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86MODULE_FIRMWARE(FW_FILE_NAME_E2);
85 87
86static int multi_mode = 1; 88static int multi_mode = 1;
87module_param(multi_mode, int, 0); 89module_param(multi_mode, int, 0);
@@ -124,6 +126,8 @@ enum bnx2x_board_type {
124 BCM57710 = 0, 126 BCM57710 = 0,
125 BCM57711 = 1, 127 BCM57711 = 1,
126 BCM57711E = 2, 128 BCM57711E = 2,
129 BCM57712 = 3,
130 BCM57712E = 4
127}; 131};
128 132
129/* indexed by board_type, above */ 133/* indexed by board_type, above */
@@ -132,14 +136,24 @@ static struct {
132} board_info[] __devinitdata = { 136} board_info[] __devinitdata = {
133 { "Broadcom NetXtreme II BCM57710 XGb" }, 137 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" }, 138 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" } 139 { "Broadcom NetXtreme II BCM57711E XGb" },
140 { "Broadcom NetXtreme II BCM57712 XGb" },
141 { "Broadcom NetXtreme II BCM57712E XGb" }
136}; 142};
137 143
144#ifndef PCI_DEVICE_ID_NX2_57712
145#define PCI_DEVICE_ID_NX2_57712 0x1662
146#endif
147#ifndef PCI_DEVICE_ID_NX2_57712E
148#define PCI_DEVICE_ID_NX2_57712E 0x1663
149#endif
138 150
139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 151static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
143 { 0 } 157 { 0 }
144}; 158};
145 159
@@ -353,7 +367,8 @@ static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
353 u8 ticks) 367 u8 ticks)
354{ 368{
355 369
356 int index_offset = 370 int index_offset = CHIP_IS_E2(bp) ?
371 offsetof(struct hc_status_block_data_e2, index_data) :
357 offsetof(struct hc_status_block_data_e1x, index_data); 372 offsetof(struct hc_status_block_data_e1x, index_data);
358 u32 addr = BAR_CSTRORM_INTMEM + 373 u32 addr = BAR_CSTRORM_INTMEM +
359 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 374 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
@@ -369,7 +384,8 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
369 u8 disable) 384 u8 disable)
370{ 385{
371 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 386 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
372 int index_offset = 387 int index_offset = CHIP_IS_E2(bp) ?
388 offsetof(struct hc_status_block_data_e2, index_data) :
373 offsetof(struct hc_status_block_data_e1x, index_data); 389 offsetof(struct hc_status_block_data_e1x, index_data);
374 u32 addr = BAR_CSTRORM_INTMEM + 390 u32 addr = BAR_CSTRORM_INTMEM +
375 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 391 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
@@ -408,6 +424,75 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
408 return val; 424 return val;
409} 425}
410 426
427#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
428#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
429#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
430#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
431#define DMAE_DP_DST_NONE "dst_addr [none]"
432
433void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
434{
435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
436
437 switch (dmae->opcode & DMAE_COMMAND_DST) {
438 case DMAE_CMD_DST_PCI:
439 if (src_type == DMAE_CMD_SRC_PCI)
440 DP(msglvl, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
444 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
445 dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 else
448 DP(msglvl, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae->opcode, dmae->src_addr_lo >> 2,
452 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
453 dmae->comp_addr_hi, dmae->comp_addr_lo,
454 dmae->comp_val);
455 break;
456 case DMAE_CMD_DST_GRC:
457 if (src_type == DMAE_CMD_SRC_PCI)
458 DP(msglvl, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
462 dmae->len, dmae->dst_addr_lo >> 2,
463 dmae->comp_addr_hi, dmae->comp_addr_lo,
464 dmae->comp_val);
465 else
466 DP(msglvl, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae->opcode, dmae->src_addr_lo >> 2,
470 dmae->len, dmae->dst_addr_lo >> 2,
471 dmae->comp_addr_hi, dmae->comp_addr_lo,
472 dmae->comp_val);
473 break;
474 default:
475 if (src_type == DMAE_CMD_SRC_PCI)
476 DP(msglvl, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 "dst_addr [none]\n"
479 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
481 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
482 dmae->comp_val);
483 else
484 DP(msglvl, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 "dst_addr [none]\n"
487 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae->opcode, dmae->src_addr_lo >> 2,
489 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
490 dmae->comp_val);
491 break;
492 }
493
494}
495
411const u32 dmae_reg_go_c[] = { 496const u32 dmae_reg_go_c[] = {
412 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, 497 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
413 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, 498 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
@@ -431,85 +516,137 @@ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
431 REG_WR(bp, dmae_reg_go_c[idx], 1); 516 REG_WR(bp, dmae_reg_go_c[idx], 1);
432} 517}
433 518
434void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 519u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
435 u32 len32)
436{ 520{
437 struct dmae_command dmae; 521 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
438 u32 *wb_comp = bnx2x_sp(bp, wb_comp); 522 DMAE_CMD_C_ENABLE);
439 int cnt = 200; 523}
440 524
441 if (!bp->dmae_ready) { 525u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
442 u32 *data = bnx2x_sp(bp, wb_data[0]); 526{
527 return opcode & ~DMAE_CMD_SRC_RESET;
528}
443 529
444 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)" 530u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
445 " using indirect\n", dst_addr, len32); 531 bool with_comp, u8 comp_type)
446 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 532{
447 return; 533 u32 opcode = 0;
448 } 534
535 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
536 (dst_type << DMAE_COMMAND_DST_SHIFT));
449 537
450 memset(&dmae, 0, sizeof(struct dmae_command)); 538 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
539
540 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
541 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
542 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
543 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
451 544
452 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
453 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
454 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
455#ifdef __BIG_ENDIAN 545#ifdef __BIG_ENDIAN
456 DMAE_CMD_ENDIANITY_B_DW_SWAP | 546 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
457#else 547#else
458 DMAE_CMD_ENDIANITY_DW_SWAP | 548 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
459#endif 549#endif
460 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | 550 if (with_comp)
461 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); 551 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
462 dmae.src_addr_lo = U64_LO(dma_addr); 552 return opcode;
463 dmae.src_addr_hi = U64_HI(dma_addr); 553}
464 dmae.dst_addr_lo = dst_addr >> 2; 554
465 dmae.dst_addr_hi = 0; 555void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
466 dmae.len = len32; 556 u8 src_type, u8 dst_type)
467 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); 557{
468 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); 558 memset(dmae, 0, sizeof(struct dmae_command));
469 dmae.comp_val = DMAE_COMP_VAL; 559
470 560 /* set the opcode */
471 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n" 561 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
472 DP_LEVEL "src_addr [%x:%08x] len [%d *4] " 562 true, DMAE_COMP_PCI);
473 "dst_addr [%x:%08x (%08x)]\n" 563
474 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n", 564 /* fill in the completion parameters */
475 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo, 565 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
476 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr, 566 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
477 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val); 567 dmae->comp_val = DMAE_COMP_VAL;
478 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", 568}
569
570/* issue a dmae command over the init-channel and wailt for completion */
571int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
572{
573 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
574 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
575 int rc = 0;
576
577 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
479 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], 578 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
480 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 579 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
481 580
581 /* lock the dmae channel */
482 mutex_lock(&bp->dmae_mutex); 582 mutex_lock(&bp->dmae_mutex);
483 583
584 /* reset completion */
484 *wb_comp = 0; 585 *wb_comp = 0;
485 586
486 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp)); 587 /* post the command on the channel used for initializations */
588 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
487 589
590 /* wait for completion */
488 udelay(5); 591 udelay(5);
489 592 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
490 while (*wb_comp != DMAE_COMP_VAL) {
491 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); 593 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
492 594
493 if (!cnt) { 595 if (!cnt) {
494 BNX2X_ERR("DMAE timeout!\n"); 596 BNX2X_ERR("DMAE timeout!\n");
495 break; 597 rc = DMAE_TIMEOUT;
598 goto unlock;
496 } 599 }
497 cnt--; 600 cnt--;
498 /* adjust delay for emulation/FPGA */ 601 udelay(50);
499 if (CHIP_REV_IS_SLOW(bp))
500 msleep(100);
501 else
502 udelay(5);
503 } 602 }
603 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
604 BNX2X_ERR("DMAE PCI error!\n");
605 rc = DMAE_PCI_ERROR;
606 }
607
608 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
609 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
610 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
504 611
612unlock:
505 mutex_unlock(&bp->dmae_mutex); 613 mutex_unlock(&bp->dmae_mutex);
614 return rc;
615}
616
617void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
618 u32 len32)
619{
620 struct dmae_command dmae;
621
622 if (!bp->dmae_ready) {
623 u32 *data = bnx2x_sp(bp, wb_data[0]);
624
625 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
626 " using indirect\n", dst_addr, len32);
627 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
628 return;
629 }
630
631 /* set opcode and fixed command fields */
632 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
633
634 /* fill in addresses and len */
635 dmae.src_addr_lo = U64_LO(dma_addr);
636 dmae.src_addr_hi = U64_HI(dma_addr);
637 dmae.dst_addr_lo = dst_addr >> 2;
638 dmae.dst_addr_hi = 0;
639 dmae.len = len32;
640
641 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
642
643 /* issue the command and wait for completion */
644 bnx2x_issue_dmae_with_comp(bp, &dmae);
506} 645}
507 646
508void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 647void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
509{ 648{
510 struct dmae_command dmae; 649 struct dmae_command dmae;
511 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
512 int cnt = 200;
513 650
514 if (!bp->dmae_ready) { 651 if (!bp->dmae_ready) {
515 u32 *data = bnx2x_sp(bp, wb_data[0]); 652 u32 *data = bnx2x_sp(bp, wb_data[0]);
@@ -522,62 +659,20 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
522 return; 659 return;
523 } 660 }
524 661
525 memset(&dmae, 0, sizeof(struct dmae_command)); 662 /* set opcode and fixed command fields */
663 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
526 664
527 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 665 /* fill in addresses and len */
528 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
529 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
530#ifdef __BIG_ENDIAN
531 DMAE_CMD_ENDIANITY_B_DW_SWAP |
532#else
533 DMAE_CMD_ENDIANITY_DW_SWAP |
534#endif
535 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
536 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
537 dmae.src_addr_lo = src_addr >> 2; 666 dmae.src_addr_lo = src_addr >> 2;
538 dmae.src_addr_hi = 0; 667 dmae.src_addr_hi = 0;
539 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); 668 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
540 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 669 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
541 dmae.len = len32; 670 dmae.len = len32;
542 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
543 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
544 dmae.comp_val = DMAE_COMP_VAL;
545
546 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
547 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
548 "dst_addr [%x:%08x (%08x)]\n"
549 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
550 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
551 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
552 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
553
554 mutex_lock(&bp->dmae_mutex);
555
556 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
557 *wb_comp = 0;
558
559 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
560
561 udelay(5);
562 671
563 while (*wb_comp != DMAE_COMP_VAL) { 672 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
564 673
565 if (!cnt) { 674 /* issue the command and wait for completion */
566 BNX2X_ERR("DMAE timeout!\n"); 675 bnx2x_issue_dmae_with_comp(bp, &dmae);
567 break;
568 }
569 cnt--;
570 /* adjust delay for emulation/FPGA */
571 if (CHIP_REV_IS_SLOW(bp))
572 msleep(100);
573 else
574 udelay(5);
575 }
576 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
579
580 mutex_unlock(&bp->dmae_mutex);
581} 676}
582 677
583void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 678void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
@@ -744,19 +839,24 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
744 u32 mark, offset; 839 u32 mark, offset;
745 __be32 data[9]; 840 __be32 data[9];
746 int word; 841 int word;
747 842 u32 trace_shmem_base;
748 if (BP_NOMCP(bp)) { 843 if (BP_NOMCP(bp)) {
749 BNX2X_ERR("NO MCP - can not dump\n"); 844 BNX2X_ERR("NO MCP - can not dump\n");
750 return; 845 return;
751 } 846 }
752 847
753 addr = bp->common.shmem_base - 0x0800 + 4; 848 if (BP_PATH(bp) == 0)
849 trace_shmem_base = bp->common.shmem_base;
850 else
851 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
852 addr = trace_shmem_base - 0x0800 + 4;
754 mark = REG_RD(bp, addr); 853 mark = REG_RD(bp, addr);
755 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000; 854 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
855 + ((mark + 0x3) & ~0x3) - 0x08000000;
756 pr_err("begin fw dump (mark 0x%x)\n", mark); 856 pr_err("begin fw dump (mark 0x%x)\n", mark);
757 857
758 pr_err(""); 858 pr_err("");
759 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) { 859 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
760 for (word = 0; word < 8; word++) 860 for (word = 0; word < 8; word++)
761 data[word] = htonl(REG_RD(bp, offset + 4*word)); 861 data[word] = htonl(REG_RD(bp, offset + 4*word));
762 data[8] = 0x0; 862 data[8] = 0x0;
@@ -822,10 +922,15 @@ void bnx2x_panic_dump(struct bnx2x *bp)
822 for_each_queue(bp, i) { 922 for_each_queue(bp, i) {
823 struct bnx2x_fastpath *fp = &bp->fp[i]; 923 struct bnx2x_fastpath *fp = &bp->fp[i];
824 int loop; 924 int loop;
925 struct hc_status_block_data_e2 sb_data_e2;
825 struct hc_status_block_data_e1x sb_data_e1x; 926 struct hc_status_block_data_e1x sb_data_e1x;
826 struct hc_status_block_sm *hc_sm_p = 927 struct hc_status_block_sm *hc_sm_p =
928 CHIP_IS_E2(bp) ?
929 sb_data_e2.common.state_machine :
827 sb_data_e1x.common.state_machine; 930 sb_data_e1x.common.state_machine;
828 struct hc_index_data *hc_index_p = 931 struct hc_index_data *hc_index_p =
932 CHIP_IS_E2(bp) ?
933 sb_data_e2.index_data :
829 sb_data_e1x.index_data; 934 sb_data_e1x.index_data;
830 int data_size; 935 int data_size;
831 u32 *sb_data_p; 936 u32 *sb_data_p;
@@ -849,7 +954,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
849 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 954 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
850 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 955 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
851 956
852 loop = HC_SB_MAX_INDICES_E1X; 957 loop = CHIP_IS_E2(bp) ?
958 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
853 959
854 /* host sb data */ 960 /* host sb data */
855 961
@@ -865,23 +971,36 @@ void bnx2x_panic_dump(struct bnx2x *bp)
865 fp->sb_index_values[j], 971 fp->sb_index_values[j],
866 (j == loop - 1) ? ")" : " "); 972 (j == loop - 1) ? ")" : " ");
867 /* fw sb data */ 973 /* fw sb data */
868 data_size = 974 data_size = CHIP_IS_E2(bp) ?
975 sizeof(struct hc_status_block_data_e2) :
869 sizeof(struct hc_status_block_data_e1x); 976 sizeof(struct hc_status_block_data_e1x);
870 data_size /= sizeof(u32); 977 data_size /= sizeof(u32);
871 sb_data_p = (u32 *)&sb_data_e1x; 978 sb_data_p = CHIP_IS_E2(bp) ?
979 (u32 *)&sb_data_e2 :
980 (u32 *)&sb_data_e1x;
872 /* copy sb data in here */ 981 /* copy sb data in here */
873 for (j = 0; j < data_size; j++) 982 for (j = 0; j < data_size; j++)
874 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + 983 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
875 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + 984 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
876 j * sizeof(u32)); 985 j * sizeof(u32));
877 986
878 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) " 987 if (CHIP_IS_E2(bp)) {
879 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n", 988 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
880 sb_data_e1x.common.p_func.pf_id, 989 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
881 sb_data_e1x.common.p_func.vf_id, 990 sb_data_e2.common.p_func.pf_id,
882 sb_data_e1x.common.p_func.vf_valid, 991 sb_data_e2.common.p_func.vf_id,
883 sb_data_e1x.common.p_func.vnic_id, 992 sb_data_e2.common.p_func.vf_valid,
884 sb_data_e1x.common.same_igu_sb_1b); 993 sb_data_e2.common.p_func.vnic_id,
994 sb_data_e2.common.same_igu_sb_1b);
995 } else {
996 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
997 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
998 sb_data_e1x.common.p_func.pf_id,
999 sb_data_e1x.common.p_func.vf_id,
1000 sb_data_e1x.common.p_func.vf_valid,
1001 sb_data_e1x.common.p_func.vnic_id,
1002 sb_data_e1x.common.same_igu_sb_1b);
1003 }
885 1004
886 /* SB_SMs data */ 1005 /* SB_SMs data */
887 for (j = 0; j < HC_SB_MAX_SM; j++) { 1006 for (j = 0; j < HC_SB_MAX_SM; j++) {
@@ -969,7 +1088,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
969 BNX2X_ERR("end crash dump -----------------\n"); 1088 BNX2X_ERR("end crash dump -----------------\n");
970} 1089}
971 1090
972void bnx2x_int_enable(struct bnx2x *bp) 1091static void bnx2x_hc_int_enable(struct bnx2x *bp)
973{ 1092{
974 int port = BP_PORT(bp); 1093 int port = BP_PORT(bp);
975 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1094 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -1011,7 +1130,7 @@ void bnx2x_int_enable(struct bnx2x *bp)
1011 mmiowb(); 1130 mmiowb();
1012 barrier(); 1131 barrier();
1013 1132
1014 if (CHIP_IS_E1H(bp)) { 1133 if (!CHIP_IS_E1(bp)) {
1015 /* init leading/trailing edge */ 1134 /* init leading/trailing edge */
1016 if (IS_MF(bp)) { 1135 if (IS_MF(bp)) {
1017 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1136 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
@@ -1029,7 +1148,66 @@ void bnx2x_int_enable(struct bnx2x *bp)
1029 mmiowb(); 1148 mmiowb();
1030} 1149}
1031 1150
1032void bnx2x_int_disable(struct bnx2x *bp) 1151static void bnx2x_igu_int_enable(struct bnx2x *bp)
1152{
1153 u32 val;
1154 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1155 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1156
1157 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1158
1159 if (msix) {
1160 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1161 IGU_PF_CONF_SINGLE_ISR_EN);
1162 val |= (IGU_PF_CONF_FUNC_EN |
1163 IGU_PF_CONF_MSI_MSIX_EN |
1164 IGU_PF_CONF_ATTN_BIT_EN);
1165 } else if (msi) {
1166 val &= ~IGU_PF_CONF_INT_LINE_EN;
1167 val |= (IGU_PF_CONF_FUNC_EN |
1168 IGU_PF_CONF_MSI_MSIX_EN |
1169 IGU_PF_CONF_ATTN_BIT_EN |
1170 IGU_PF_CONF_SINGLE_ISR_EN);
1171 } else {
1172 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1173 val |= (IGU_PF_CONF_FUNC_EN |
1174 IGU_PF_CONF_INT_LINE_EN |
1175 IGU_PF_CONF_ATTN_BIT_EN |
1176 IGU_PF_CONF_SINGLE_ISR_EN);
1177 }
1178
1179 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1180 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1181
1182 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1183
1184 barrier();
1185
1186 /* init leading/trailing edge */
1187 if (IS_MF(bp)) {
1188 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1189 if (bp->port.pmf)
1190 /* enable nig and gpio3 attention */
1191 val |= 0x1100;
1192 } else
1193 val = 0xffff;
1194
1195 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1196 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1197
1198 /* Make sure that interrupts are indeed enabled from here on */
1199 mmiowb();
1200}
1201
1202void bnx2x_int_enable(struct bnx2x *bp)
1203{
1204 if (bp->common.int_block == INT_BLOCK_HC)
1205 bnx2x_hc_int_enable(bp);
1206 else
1207 bnx2x_igu_int_enable(bp);
1208}
1209
1210static void bnx2x_hc_int_disable(struct bnx2x *bp)
1033{ 1211{
1034 int port = BP_PORT(bp); 1212 int port = BP_PORT(bp);
1035 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1213 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -1051,6 +1229,32 @@ void bnx2x_int_disable(struct bnx2x *bp)
1051 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1229 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1052} 1230}
1053 1231
1232static void bnx2x_igu_int_disable(struct bnx2x *bp)
1233{
1234 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1235
1236 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1237 IGU_PF_CONF_INT_LINE_EN |
1238 IGU_PF_CONF_ATTN_BIT_EN);
1239
1240 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1241
1242 /* flush all outstanding writes */
1243 mmiowb();
1244
1245 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1246 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1247 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1248}
1249
1250void bnx2x_int_disable(struct bnx2x *bp)
1251{
1252 if (bp->common.int_block == INT_BLOCK_HC)
1253 bnx2x_hc_int_disable(bp);
1254 else
1255 bnx2x_igu_int_disable(bp);
1256}
1257
1054void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 1258void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1055{ 1259{
1056 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1260 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -1194,7 +1398,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1194 return IRQ_HANDLED; 1398 return IRQ_HANDLED;
1195#endif 1399#endif
1196 1400
1197 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) { 1401 for_each_queue(bp, i) {
1198 struct bnx2x_fastpath *fp = &bp->fp[i]; 1402 struct bnx2x_fastpath *fp = &bp->fp[i];
1199 1403
1200 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE); 1404 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
@@ -1579,7 +1783,7 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1579 /* Initialize link parameters structure variables */ 1783 /* Initialize link parameters structure variables */
1580 /* It is recommended to turn off RX FC for jumbo frames 1784 /* It is recommended to turn off RX FC for jumbo frames
1581 for better performance */ 1785 for better performance */
1582 if (bp->dev->mtu > 5000) 1786 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1583 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; 1787 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1584 else 1788 else
1585 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 1789 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
@@ -1693,13 +1897,11 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp)
1693static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) 1897static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1694{ 1898{
1695 int all_zero = 1; 1899 int all_zero = 1;
1696 int port = BP_PORT(bp);
1697 int vn; 1900 int vn;
1698 1901
1699 bp->vn_weight_sum = 0; 1902 bp->vn_weight_sum = 0;
1700 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 1903 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1701 int func = 2*vn + port; 1904 u32 vn_cfg = bp->mf_config[vn];
1702 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
1703 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1905 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1704 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1906 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1705 1907
@@ -1727,11 +1929,12 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1727 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 1929 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1728} 1930}
1729 1931
1730static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) 1932static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1731{ 1933{
1732 struct rate_shaping_vars_per_vn m_rs_vn; 1934 struct rate_shaping_vars_per_vn m_rs_vn;
1733 struct fairness_vars_per_vn m_fair_vn; 1935 struct fairness_vars_per_vn m_fair_vn;
1734 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config); 1936 u32 vn_cfg = bp->mf_config[vn];
1937 int func = 2*vn + BP_PORT(bp);
1735 u16 vn_min_rate, vn_max_rate; 1938 u16 vn_min_rate, vn_max_rate;
1736 int i; 1939 int i;
1737 1940
@@ -1744,7 +1947,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1744 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1947 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1745 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1948 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1746 /* If min rate is zero - set it to 1 */ 1949 /* If min rate is zero - set it to 1 */
1747 if (!vn_min_rate) 1950 if (bp->vn_weight_sum && (vn_min_rate == 0))
1748 vn_min_rate = DEF_MIN_RATE; 1951 vn_min_rate = DEF_MIN_RATE;
1749 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1952 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1750 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 1953 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
@@ -1807,7 +2010,7 @@ static void bnx2x_read_mf_cfg(struct bnx2x *bp)
1807 2010
1808 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2011 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1809 int /*abs*/func = 2*vn + BP_PORT(bp); 2012 int /*abs*/func = 2*vn + BP_PORT(bp);
1810 bp->mf_config = 2013 bp->mf_config[vn] =
1811 MF_CFG_RD(bp, func_mf_config[func].config); 2014 MF_CFG_RD(bp, func_mf_config[func].config);
1812 } 2015 }
1813} 2016}
@@ -1878,7 +2081,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
1878 if (bp->link_vars.link_up) { 2081 if (bp->link_vars.link_up) {
1879 2082
1880 /* dropless flow control */ 2083 /* dropless flow control */
1881 if (CHIP_IS_E1H(bp) && bp->dropless_fc) { 2084 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1882 int port = BP_PORT(bp); 2085 int port = BP_PORT(bp);
1883 u32 pause_enabled = 0; 2086 u32 pause_enabled = 0;
1884 2087
@@ -1906,37 +2109,19 @@ static void bnx2x_link_attn(struct bnx2x *bp)
1906 if (prev_link_status != bp->link_vars.link_status) 2109 if (prev_link_status != bp->link_vars.link_status)
1907 bnx2x_link_report(bp); 2110 bnx2x_link_report(bp);
1908 2111
1909 if (IS_MF(bp)) { 2112 if (IS_MF(bp))
1910 int port = BP_PORT(bp); 2113 bnx2x_link_sync_notify(bp);
1911 int func;
1912 int vn;
1913
1914 /* Set the attention towards other drivers on the same port */
1915 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1916 if (vn == BP_E1HVN(bp))
1917 continue;
1918
1919 func = ((vn << 1) | port);
1920 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1921 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1922 }
1923
1924 if (bp->link_vars.link_up) {
1925 int i;
1926
1927 /* Init rate shaping and fairness contexts */
1928 bnx2x_init_port_minmax(bp);
1929 2114
1930 for (vn = VN_0; vn < E1HVN_MAX; vn++) 2115 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
1931 bnx2x_init_vn_minmax(bp, 2*vn + port); 2116 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
1932 2117
1933 /* Store it to internal memory */ 2118 if (cmng_fns != CMNG_FNS_NONE) {
1934 for (i = 0; 2119 bnx2x_cmng_fns_init(bp, false, cmng_fns);
1935 i < sizeof(struct cmng_struct_per_port) / 4; i++) 2120 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
1936 REG_WR(bp, BAR_XSTRORM_INTMEM + 2121 } else
1937 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4, 2122 /* rate shaping and fairness are disabled */
1938 ((u32 *)(&bp->cmng))[i]); 2123 DP(NETIF_MSG_IFUP,
1939 } 2124 "single function mode without fairness\n");
1940 } 2125 }
1941} 2126}
1942 2127
@@ -1952,7 +2137,9 @@ void bnx2x__link_status_update(struct bnx2x *bp)
1952 else 2137 else
1953 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2138 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1954 2139
1955 bnx2x_calc_vn_weight_sum(bp); 2140 /* the link status update could be the result of a DCC event
2141 hence re-read the shmem mf configuration */
2142 bnx2x_read_mf_cfg(bp);
1956 2143
1957 /* indicate link status */ 2144 /* indicate link status */
1958 bnx2x_link_report(bp); 2145 bnx2x_link_report(bp);
@@ -1968,8 +2155,13 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
1968 2155
1969 /* enable nig attention */ 2156 /* enable nig attention */
1970 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); 2157 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1971 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2158 if (bp->common.int_block == INT_BLOCK_HC) {
1972 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2160 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2161 } else if (CHIP_IS_E2(bp)) {
2162 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2163 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2164 }
1973 2165
1974 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 2166 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1975} 2167}
@@ -1985,22 +2177,23 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
1985/* send the MCP a request, block until there is a reply */ 2177/* send the MCP a request, block until there is a reply */
1986u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) 2178u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
1987{ 2179{
1988 int func = BP_FUNC(bp); 2180 int mb_idx = BP_FW_MB_IDX(bp);
1989 u32 seq = ++bp->fw_seq; 2181 u32 seq = ++bp->fw_seq;
1990 u32 rc = 0; 2182 u32 rc = 0;
1991 u32 cnt = 1; 2183 u32 cnt = 1;
1992 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2184 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1993 2185
1994 mutex_lock(&bp->fw_mb_mutex); 2186 mutex_lock(&bp->fw_mb_mutex);
1995 SHMEM_WR(bp, func_mb[func].drv_mb_param, param); 2187 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
1996 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 2188 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2189
1997 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 2190 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1998 2191
1999 do { 2192 do {
2000 /* let the FW do it's magic ... */ 2193 /* let the FW do it's magic ... */
2001 msleep(delay); 2194 msleep(delay);
2002 2195
2003 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 2196 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2004 2197
2005 /* Give the FW up to 5 second (500*10ms) */ 2198 /* Give the FW up to 5 second (500*10ms) */
2006 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2199 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
@@ -2264,10 +2457,28 @@ void bnx2x_pf_init(struct bnx2x *bp)
2264 if (!CHIP_IS_E1(bp)) 2457 if (!CHIP_IS_E1(bp))
2265 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp)); 2458 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2266 2459
2460 if (CHIP_IS_E2(bp)) {
2461 /* reset IGU PF statistics: MSIX + ATTN */
2462 /* PF */
2463 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2464 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2465 (CHIP_MODE_IS_4_PORT(bp) ?
2466 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2467 /* ATTN */
2468 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2469 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2470 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2471 (CHIP_MODE_IS_4_PORT(bp) ?
2472 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2473 }
2474
2267 /* function setup flags */ 2475 /* function setup flags */
2268 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 2476 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2269 2477
2270 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; 2478 if (CHIP_IS_E1x(bp))
2479 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2480 else
2481 flags |= FUNC_FLG_TPA;
2271 2482
2272 /** 2483 /**
2273 * Although RSS is meaningless when there is a single HW queue we 2484 * Although RSS is meaningless when there is a single HW queue we
@@ -2361,7 +2572,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2361 * where the bp->flags can change so it is done without any 2572 * where the bp->flags can change so it is done without any
2362 * locks 2573 * locks
2363 */ 2574 */
2364 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 2575 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2365 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); 2576 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2366 bp->flags |= MF_FUNC_DIS; 2577 bp->flags |= MF_FUNC_DIS;
2367 2578
@@ -2548,14 +2759,13 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2548static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 2759static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2549{ 2760{
2550 int port = BP_PORT(bp); 2761 int port = BP_PORT(bp);
2551 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2552 COMMAND_REG_ATTN_BITS_SET);
2553 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2762 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2554 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2763 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2555 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2764 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2556 NIG_REG_MASK_INTERRUPT_PORT0; 2765 NIG_REG_MASK_INTERRUPT_PORT0;
2557 u32 aeu_mask; 2766 u32 aeu_mask;
2558 u32 nig_mask = 0; 2767 u32 nig_mask = 0;
2768 u32 reg_addr;
2559 2769
2560 if (bp->attn_state & asserted) 2770 if (bp->attn_state & asserted)
2561 BNX2X_ERR("IGU ERROR\n"); 2771 BNX2X_ERR("IGU ERROR\n");
@@ -2630,9 +2840,15 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2630 2840
2631 } /* if hardwired */ 2841 } /* if hardwired */
2632 2842
2633 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", 2843 if (bp->common.int_block == INT_BLOCK_HC)
2634 asserted, hc_addr); 2844 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2635 REG_WR(bp, hc_addr, asserted); 2845 COMMAND_REG_ATTN_BITS_SET);
2846 else
2847 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2848
2849 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2850 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2851 REG_WR(bp, reg_addr, asserted);
2636 2852
2637 /* now set back the mask */ 2853 /* now set back the mask */
2638 if (asserted & ATTN_NIG_FOR_FUNC) { 2854 if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -2753,6 +2969,10 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2753 /* RQ_USDMDP_FIFO_OVERFLOW */ 2969 /* RQ_USDMDP_FIFO_OVERFLOW */
2754 if (val & 0x18000) 2970 if (val & 0x18000)
2755 BNX2X_ERR("FATAL error from PXP\n"); 2971 BNX2X_ERR("FATAL error from PXP\n");
2972 if (CHIP_IS_E2(bp)) {
2973 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2974 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2975 }
2756 } 2976 }
2757 2977
2758 if (attn & HW_INTERRUT_ASSERT_SET_2) { 2978 if (attn & HW_INTERRUT_ASSERT_SET_2) {
@@ -2783,9 +3003,10 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2783 int func = BP_FUNC(bp); 3003 int func = BP_FUNC(bp);
2784 3004
2785 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3005 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2786 bp->mf_config = 3006 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
2787 MF_CFG_RD(bp, func_mf_config[func].config); 3007 func_mf_config[BP_ABS_FUNC(bp)].config);
2788 val = SHMEM_RD(bp, func_mb[func].drv_status); 3008 val = SHMEM_RD(bp,
3009 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2789 if (val & DRV_STATUS_DCC_EVENT_MASK) 3010 if (val & DRV_STATUS_DCC_EVENT_MASK)
2790 bnx2x_dcc_event(bp, 3011 bnx2x_dcc_event(bp,
2791 (val & DRV_STATUS_DCC_EVENT_MASK)); 3012 (val & DRV_STATUS_DCC_EVENT_MASK));
@@ -2815,13 +3036,13 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2815 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 3036 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2816 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); 3037 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2817 if (attn & BNX2X_GRC_TIMEOUT) { 3038 if (attn & BNX2X_GRC_TIMEOUT) {
2818 val = CHIP_IS_E1H(bp) ? 3039 val = CHIP_IS_E1(bp) ? 0 :
2819 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0; 3040 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
2820 BNX2X_ERR("GRC time-out 0x%08x\n", val); 3041 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2821 } 3042 }
2822 if (attn & BNX2X_GRC_RSV) { 3043 if (attn & BNX2X_GRC_RSV) {
2823 val = CHIP_IS_E1H(bp) ? 3044 val = CHIP_IS_E1(bp) ? 0 :
2824 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0; 3045 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
2825 BNX2X_ERR("GRC reserved 0x%08x\n", val); 3046 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2826 } 3047 }
2827 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 3048 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
@@ -3126,6 +3347,74 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3126 attn.sig[3]); 3347 attn.sig[3]);
3127} 3348}
3128 3349
3350
3351static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3352{
3353 u32 val;
3354 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3355
3356 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3357 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3358 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3359 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3360 "ADDRESS_ERROR\n");
3361 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3362 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3363 "INCORRECT_RCV_BEHAVIOR\n");
3364 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3365 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3366 "WAS_ERROR_ATTN\n");
3367 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3368 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3369 "VF_LENGTH_VIOLATION_ATTN\n");
3370 if (val &
3371 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3374 if (val &
3375 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3376 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3377 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3378 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3379 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3380 "TCPL_ERROR_ATTN\n");
3381 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3382 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3383 "TCPL_IN_TWO_RCBS_ATTN\n");
3384 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3385 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3386 "CSSNOOP_FIFO_OVERFLOW\n");
3387 }
3388 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3389 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3390 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3391 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3392 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3393 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3394 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3395 "_ATC_TCPL_TO_NOT_PEND\n");
3396 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3397 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3398 "ATC_GPA_MULTIPLE_HITS\n");
3399 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3400 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3401 "ATC_RCPL_TO_EMPTY_CNT\n");
3402 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3403 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3404 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3405 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3406 "ATC_IREQ_LESS_THAN_STU\n");
3407 }
3408
3409 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3410 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3411 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3412 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3413 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3414 }
3415
3416}
3417
3129static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 3418static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3130{ 3419{
3131 struct attn_route attn, *group_mask; 3420 struct attn_route attn, *group_mask;
@@ -3156,17 +3445,28 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3156 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 3445 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3157 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 3446 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3158 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 3447 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3159 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n", 3448 if (CHIP_IS_E2(bp))
3160 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]); 3449 attn.sig[4] =
3450 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3451 else
3452 attn.sig[4] = 0;
3453
3454 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3455 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3161 3456
3162 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 3457 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3163 if (deasserted & (1 << index)) { 3458 if (deasserted & (1 << index)) {
3164 group_mask = &bp->attn_group[index]; 3459 group_mask = &bp->attn_group[index];
3165 3460
3166 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", 3461 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3167 index, group_mask->sig[0], group_mask->sig[1], 3462 "%08x %08x %08x\n",
3168 group_mask->sig[2], group_mask->sig[3]); 3463 index,
3464 group_mask->sig[0], group_mask->sig[1],
3465 group_mask->sig[2], group_mask->sig[3],
3466 group_mask->sig[4]);
3169 3467
3468 bnx2x_attn_int_deasserted4(bp,
3469 attn.sig[4] & group_mask->sig[4]);
3170 bnx2x_attn_int_deasserted3(bp, 3470 bnx2x_attn_int_deasserted3(bp,
3171 attn.sig[3] & group_mask->sig[3]); 3471 attn.sig[3] & group_mask->sig[3]);
3172 bnx2x_attn_int_deasserted1(bp, 3472 bnx2x_attn_int_deasserted1(bp,
@@ -3180,11 +3480,15 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3180 3480
3181 bnx2x_release_alr(bp); 3481 bnx2x_release_alr(bp);
3182 3482
3183 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); 3483 if (bp->common.int_block == INT_BLOCK_HC)
3484 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3485 COMMAND_REG_ATTN_BITS_CLR);
3486 else
3487 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3184 3488
3185 val = ~deasserted; 3489 val = ~deasserted;
3186 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", 3490 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3187 val, reg_addr); 3491 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3188 REG_WR(bp, reg_addr, val); 3492 REG_WR(bp, reg_addr, val);
3189 3493
3190 if (~bp->attn_state & deasserted) 3494 if (~bp->attn_state & deasserted)
@@ -3471,7 +3775,7 @@ static void bnx2x_timer(unsigned long data)
3471 } 3775 }
3472 3776
3473 if (!BP_NOMCP(bp)) { 3777 if (!BP_NOMCP(bp)) {
3474 int func = BP_FUNC(bp); 3778 int mb_idx = BP_FW_MB_IDX(bp);
3475 u32 drv_pulse; 3779 u32 drv_pulse;
3476 u32 mcp_pulse; 3780 u32 mcp_pulse;
3477 3781
@@ -3479,9 +3783,9 @@ static void bnx2x_timer(unsigned long data)
3479 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 3783 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3480 /* TBD - add SYSTEM_TIME */ 3784 /* TBD - add SYSTEM_TIME */
3481 drv_pulse = bp->fw_drv_pulse_wr_seq; 3785 drv_pulse = bp->fw_drv_pulse_wr_seq;
3482 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse); 3786 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3483 3787
3484 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) & 3788 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3485 MCP_PULSE_SEQ_MASK); 3789 MCP_PULSE_SEQ_MASK);
3486 /* The delta between driver pulse and mcp response 3790 /* The delta between driver pulse and mcp response
3487 * should be 1 (before mcp response) or 0 (after mcp response) 3791 * should be 1 (before mcp response) or 0 (after mcp response)
@@ -3539,17 +3843,26 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3539{ 3843{
3540 u32 *sb_data_p; 3844 u32 *sb_data_p;
3541 u32 data_size = 0; 3845 u32 data_size = 0;
3846 struct hc_status_block_data_e2 sb_data_e2;
3542 struct hc_status_block_data_e1x sb_data_e1x; 3847 struct hc_status_block_data_e1x sb_data_e1x;
3543 3848
3544 /* disable the function first */ 3849 /* disable the function first */
3545 memset(&sb_data_e1x, 0, 3850 if (CHIP_IS_E2(bp)) {
3546 sizeof(struct hc_status_block_data_e1x)); 3851 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3547 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED; 3852 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3548 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED; 3853 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3549 sb_data_e1x.common.p_func.vf_valid = false; 3854 sb_data_e2.common.p_func.vf_valid = false;
3550 sb_data_p = (u32 *)&sb_data_e1x; 3855 sb_data_p = (u32 *)&sb_data_e2;
3551 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 3856 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3552 3857 } else {
3858 memset(&sb_data_e1x, 0,
3859 sizeof(struct hc_status_block_data_e1x));
3860 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3861 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3862 sb_data_e1x.common.p_func.vf_valid = false;
3863 sb_data_p = (u32 *)&sb_data_e1x;
3864 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3865 }
3553 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 3866 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3554 3867
3555 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 3868 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
@@ -3610,30 +3923,48 @@ void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3610{ 3923{
3611 int igu_seg_id; 3924 int igu_seg_id;
3612 3925
3926 struct hc_status_block_data_e2 sb_data_e2;
3613 struct hc_status_block_data_e1x sb_data_e1x; 3927 struct hc_status_block_data_e1x sb_data_e1x;
3614 struct hc_status_block_sm *hc_sm_p; 3928 struct hc_status_block_sm *hc_sm_p;
3615 struct hc_index_data *hc_index_p; 3929 struct hc_index_data *hc_index_p;
3616 int data_size; 3930 int data_size;
3617 u32 *sb_data_p; 3931 u32 *sb_data_p;
3618 3932
3619 igu_seg_id = HC_SEG_ACCESS_NORM; 3933 if (CHIP_INT_MODE_IS_BC(bp))
3934 igu_seg_id = HC_SEG_ACCESS_NORM;
3935 else
3936 igu_seg_id = IGU_SEG_ACCESS_NORM;
3620 3937
3621 bnx2x_zero_fp_sb(bp, fw_sb_id); 3938 bnx2x_zero_fp_sb(bp, fw_sb_id);
3622 3939
3623 memset(&sb_data_e1x, 0, 3940 if (CHIP_IS_E2(bp)) {
3624 sizeof(struct hc_status_block_data_e1x)); 3941 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3625 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); 3942 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3626 sb_data_e1x.common.p_func.vf_id = 0xff; 3943 sb_data_e2.common.p_func.vf_id = vfid;
3627 sb_data_e1x.common.p_func.vf_valid = false; 3944 sb_data_e2.common.p_func.vf_valid = vf_valid;
3628 sb_data_e1x.common.p_func.vnic_id = BP_E1HVN(bp); 3945 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3629 sb_data_e1x.common.same_igu_sb_1b = true; 3946 sb_data_e2.common.same_igu_sb_1b = true;
3630 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); 3947 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3631 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); 3948 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3632 hc_sm_p = sb_data_e1x.common.state_machine; 3949 hc_sm_p = sb_data_e2.common.state_machine;
3633 hc_index_p = sb_data_e1x.index_data; 3950 hc_index_p = sb_data_e2.index_data;
3634 sb_data_p = (u32 *)&sb_data_e1x; 3951 sb_data_p = (u32 *)&sb_data_e2;
3635 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 3952 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3636 3953 } else {
3954 memset(&sb_data_e1x, 0,
3955 sizeof(struct hc_status_block_data_e1x));
3956 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3957 sb_data_e1x.common.p_func.vf_id = 0xff;
3958 sb_data_e1x.common.p_func.vf_valid = false;
3959 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3960 sb_data_e1x.common.same_igu_sb_1b = true;
3961 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3962 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3963 hc_sm_p = sb_data_e1x.common.state_machine;
3964 hc_index_p = sb_data_e1x.index_data;
3965 sb_data_p = (u32 *)&sb_data_e1x;
3966 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3967 }
3637 3968
3638 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 3969 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3639 igu_sb_id, igu_seg_id); 3970 igu_sb_id, igu_seg_id);
@@ -3666,6 +3997,7 @@ static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3666 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX, 3997 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3667 false, tx_usec); 3998 false, tx_usec);
3668} 3999}
4000
3669static void bnx2x_init_def_sb(struct bnx2x *bp) 4001static void bnx2x_init_def_sb(struct bnx2x *bp)
3670{ 4002{
3671 struct host_sp_status_block *def_sb = bp->def_status_blk; 4003 struct host_sp_status_block *def_sb = bp->def_status_blk;
@@ -3680,8 +4012,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
3680 struct hc_sp_status_block_data sp_sb_data; 4012 struct hc_sp_status_block_data sp_sb_data;
3681 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 4013 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3682 4014
3683 igu_sp_sb_index = DEF_SB_IGU_ID; 4015 if (CHIP_INT_MODE_IS_BC(bp)) {
3684 igu_seg_id = HC_SEG_ACCESS_DEF; 4016 igu_sp_sb_index = DEF_SB_IGU_ID;
4017 igu_seg_id = HC_SEG_ACCESS_DEF;
4018 } else {
4019 igu_sp_sb_index = bp->igu_dsb_id;
4020 igu_seg_id = IGU_SEG_ACCESS_DEF;
4021 }
3685 4022
3686 /* ATTN */ 4023 /* ATTN */
3687 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 4024 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
@@ -3698,12 +4035,29 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
3698 for (sindex = 0; sindex < 4; sindex++) 4035 for (sindex = 0; sindex < 4; sindex++)
3699 bp->attn_group[index].sig[sindex] = 4036 bp->attn_group[index].sig[sindex] =
3700 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); 4037 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4038
4039 if (CHIP_IS_E2(bp))
4040 /*
4041 * enable5 is separate from the rest of the registers,
4042 * and therefore the address skip is 4
4043 * and not 16 between the different groups
4044 */
4045 bp->attn_group[index].sig[4] = REG_RD(bp,
4046 reg_offset + 0x10 + 0x4*index);
4047 else
4048 bp->attn_group[index].sig[4] = 0;
3701 } 4049 }
3702 4050
3703 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 4051 if (bp->common.int_block == INT_BLOCK_HC) {
3704 HC_REG_ATTN_MSG0_ADDR_L); 4052 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
3705 REG_WR(bp, reg_offset, U64_LO(section)); 4053 HC_REG_ATTN_MSG0_ADDR_L);
3706 REG_WR(bp, reg_offset + 4, U64_HI(section)); 4054
4055 REG_WR(bp, reg_offset, U64_LO(section));
4056 REG_WR(bp, reg_offset + 4, U64_HI(section));
4057 } else if (CHIP_IS_E2(bp)) {
4058 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4059 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4060 }
3707 4061
3708 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 4062 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
3709 sp_sb); 4063 sp_sb);
@@ -3715,7 +4069,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
3715 sp_sb_data.igu_sb_id = igu_sp_sb_index; 4069 sp_sb_data.igu_sb_id = igu_sp_sb_index;
3716 sp_sb_data.igu_seg_id = igu_seg_id; 4070 sp_sb_data.igu_seg_id = igu_seg_id;
3717 sp_sb_data.p_func.pf_id = func; 4071 sp_sb_data.p_func.pf_id = func;
3718 sp_sb_data.p_func.vnic_id = BP_E1HVN(bp); 4072 sp_sb_data.p_func.vnic_id = BP_VN(bp);
3719 sp_sb_data.p_func.vf_id = 0xff; 4073 sp_sb_data.p_func.vf_id = 0xff;
3720 4074
3721 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 4075 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
@@ -3870,6 +4224,11 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
3870 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 4224 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3871 REG_WR(bp, BAR_USTRORM_INTMEM + 4225 REG_WR(bp, BAR_USTRORM_INTMEM +
3872 USTORM_AGG_DATA_OFFSET + i * 4, 0); 4226 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4227 if (CHIP_IS_E2(bp)) {
4228 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4229 CHIP_INT_MODE_IS_BC(bp) ?
4230 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4231 }
3873} 4232}
3874 4233
3875static void bnx2x_init_internal_port(struct bnx2x *bp) 4234static void bnx2x_init_internal_port(struct bnx2x *bp)
@@ -3881,6 +4240,7 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3881{ 4240{
3882 switch (load_code) { 4241 switch (load_code) {
3883 case FW_MSG_CODE_DRV_LOAD_COMMON: 4242 case FW_MSG_CODE_DRV_LOAD_COMMON:
4243 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
3884 bnx2x_init_internal_common(bp); 4244 bnx2x_init_internal_common(bp);
3885 /* no break */ 4245 /* no break */
3886 4246
@@ -3911,9 +4271,11 @@ static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
3911 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE; 4271 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
3912 /* qZone id equals to FW (per path) client id */ 4272 /* qZone id equals to FW (per path) client id */
3913 fp->cl_qzone_id = fp->cl_id + 4273 fp->cl_qzone_id = fp->cl_id +
3914 BP_PORT(bp)*(ETH_MAX_RX_CLIENTS_E1H); 4274 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4275 ETH_MAX_RX_CLIENTS_E1H);
3915 /* init shortcut */ 4276 /* init shortcut */
3916 fp->ustorm_rx_prods_offset = 4277 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4278 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
3917 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 4279 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
3918 /* Setup SB indicies */ 4280 /* Setup SB indicies */
3919 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 4281 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
@@ -4248,9 +4610,19 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4248static void enable_blocks_attention(struct bnx2x *bp) 4610static void enable_blocks_attention(struct bnx2x *bp)
4249{ 4611{
4250 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 4612 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4251 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); 4613 if (CHIP_IS_E2(bp))
4614 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4615 else
4616 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4252 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 4617 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4253 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 4618 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4619 /*
4620 * mask read length error interrupts in brb for parser
4621 * (parsing unit and 'checksum and crc' unit)
4622 * these errors are legal (PU reads fixed length and CAC can cause
4623 * read length error on truncated packets)
4624 */
4625 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4254 REG_WR(bp, QM_REG_QM_INT_MASK, 0); 4626 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4255 REG_WR(bp, TM_REG_TM_INT_MASK, 0); 4627 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4256 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); 4628 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
@@ -4271,6 +4643,13 @@ static void enable_blocks_attention(struct bnx2x *bp)
4271/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 4643/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4272 if (CHIP_REV_IS_FPGA(bp)) 4644 if (CHIP_REV_IS_FPGA(bp))
4273 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); 4645 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4646 else if (CHIP_IS_E2(bp))
4647 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4648 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4649 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4650 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4651 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4652 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4274 else 4653 else
4275 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); 4654 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4276 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 4655 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
@@ -4288,11 +4667,11 @@ static const struct {
4288 u32 addr; 4667 u32 addr;
4289 u32 mask; 4668 u32 mask;
4290} bnx2x_parity_mask[] = { 4669} bnx2x_parity_mask[] = {
4291 {PXP_REG_PXP_PRTY_MASK, 0xffffffff}, 4670 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4292 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff}, 4671 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4293 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff}, 4672 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4294 {HC_REG_HC_PRTY_MASK, 0xffffffff}, 4673 {HC_REG_HC_PRTY_MASK, 0x7},
4295 {MISC_REG_MISC_PRTY_MASK, 0xffffffff}, 4674 {MISC_REG_MISC_PRTY_MASK, 0x1},
4296 {QM_REG_QM_PRTY_MASK, 0x0}, 4675 {QM_REG_QM_PRTY_MASK, 0x0},
4297 {DORQ_REG_DORQ_PRTY_MASK, 0x0}, 4676 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4298 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0}, 4677 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
@@ -4407,23 +4786,97 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4407 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 4786 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4408} 4787}
4409 4788
4789static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4790{
4791 u32 offset = 0;
4792
4793 if (CHIP_IS_E1(bp))
4794 return;
4795 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4796 return;
4797
4798 switch (BP_ABS_FUNC(bp)) {
4799 case 0:
4800 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4801 break;
4802 case 1:
4803 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4804 break;
4805 case 2:
4806 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4807 break;
4808 case 3:
4809 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4810 break;
4811 case 4:
4812 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4813 break;
4814 case 5:
4815 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4816 break;
4817 case 6:
4818 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4819 break;
4820 case 7:
4821 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4822 break;
4823 default:
4824 return;
4825 }
4826
4827 REG_WR(bp, offset, pretend_func_num);
4828 REG_RD(bp, offset);
4829 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4830}
4831
4832static void bnx2x_pf_disable(struct bnx2x *bp)
4833{
4834 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4835 val &= ~IGU_PF_CONF_FUNC_EN;
4836
4837 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4838 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4839 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4840}
4841
4410static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) 4842static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4411{ 4843{
4412 u32 val, i; 4844 u32 val, i;
4413 4845
4414 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 4846 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
4415 4847
4416 bnx2x_reset_common(bp); 4848 bnx2x_reset_common(bp);
4417 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 4849 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4418 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); 4850 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4419 4851
4420 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE); 4852 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4421 if (CHIP_IS_E1H(bp)) 4853 if (!CHIP_IS_E1(bp))
4422 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp)); 4854 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4423 4855
4424 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); 4856 if (CHIP_IS_E2(bp)) {
4425 msleep(30); 4857 u8 fid;
4426 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0); 4858
4859 /**
4860 * 4-port mode or 2-port mode we need to turn of master-enable
4861 * for everyone, after that, turn it back on for self.
4862 * so, we disregard multi-function or not, and always disable
4863 * for all functions on the given path, this means 0,2,4,6 for
4864 * path 0 and 1,3,5,7 for path 1
4865 */
4866 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4867 if (fid == BP_ABS_FUNC(bp)) {
4868 REG_WR(bp,
4869 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4870 1);
4871 continue;
4872 }
4873
4874 bnx2x_pretend_func(bp, fid);
4875 /* clear pf enable */
4876 bnx2x_pf_disable(bp);
4877 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4878 }
4879 }
4427 4880
4428 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE); 4881 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
4429 if (CHIP_IS_E1(bp)) { 4882 if (CHIP_IS_E1(bp)) {
@@ -4471,9 +4924,65 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4471 return -EBUSY; 4924 return -EBUSY;
4472 } 4925 }
4473 4926
4927 /* Timers bug workaround E2 only. We need to set the entire ILT to
4928 * have entries with value "0" and valid bit on.
4929 * This needs to be done by the first PF that is loaded in a path
4930 * (i.e. common phase)
4931 */
4932 if (CHIP_IS_E2(bp)) {
4933 struct ilt_client_info ilt_cli;
4934 struct bnx2x_ilt ilt;
4935 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4936 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4937
4938 /* initalize dummy TM client */
4939 ilt_cli.start = 0;
4940 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4941 ilt_cli.client_num = ILT_CLIENT_TM;
4942
4943 /* Step 1: set zeroes to all ilt page entries with valid bit on
4944 * Step 2: set the timers first/last ilt entry to point
4945 * to the entire range to prevent ILT range error for 3rd/4th
4946 * vnic (this code assumes existance of the vnic)
4947 *
4948 * both steps performed by call to bnx2x_ilt_client_init_op()
4949 * with dummy TM client
4950 *
4951 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4952 * and his brother are split registers
4953 */
4954 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4955 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4956 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4957
4958 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4959 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4960 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4961 }
4962
4963
4474 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); 4964 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4475 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); 4965 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
4476 4966
4967 if (CHIP_IS_E2(bp)) {
4968 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4969 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4970 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4971
4972 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4973
4974 /* let the HW do it's magic ... */
4975 do {
4976 msleep(200);
4977 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4978 } while (factor-- && (val != 1));
4979
4980 if (val != 1) {
4981 BNX2X_ERR("ATC_INIT failed\n");
4982 return -EBUSY;
4983 }
4984 }
4985
4477 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE); 4986 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
4478 4987
4479 /* clean the DMAE memory */ 4988 /* clean the DMAE memory */
@@ -4492,6 +5001,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4492 5001
4493 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); 5002 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
4494 5003
5004 if (CHIP_MODE_IS_4_PORT(bp))
5005 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
4495 /* QM queues pointers table */ 5006 /* QM queues pointers table */
4496 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); 5007 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
4497 5008
@@ -4512,14 +5023,26 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4512 } 5023 }
4513 5024
4514 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); 5025 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5026 if (CHIP_MODE_IS_4_PORT(bp)) {
5027 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5028 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5029 }
5030
4515 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); 5031 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4516 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 5032 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4517#ifndef BCM_CNIC 5033#ifndef BCM_CNIC
4518 /* set NIC mode */ 5034 /* set NIC mode */
4519 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5035 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4520#endif 5036#endif
4521 if (CHIP_IS_E1H(bp)) 5037 if (!CHIP_IS_E1(bp))
4522 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp)); 5038 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
5039 if (CHIP_IS_E2(bp)) {
5040 /* Bit-map indicating which L2 hdrs may appear after the
5041 basic Ethernet header */
5042 int has_ovlan = IS_MF(bp);
5043 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5044 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5045 }
4523 5046
4524 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE); 5047 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4525 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE); 5048 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
@@ -4536,6 +5059,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4536 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE); 5059 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4537 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE); 5060 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4538 5061
5062 if (CHIP_MODE_IS_4_PORT(bp))
5063 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5064
4539 /* sync semi rtc */ 5065 /* sync semi rtc */
4540 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 5066 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4541 0x80000000); 5067 0x80000000);
@@ -4546,6 +5072,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4546 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE); 5072 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4547 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); 5073 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4548 5074
5075 if (CHIP_IS_E2(bp)) {
5076 int has_ovlan = IS_MF(bp);
5077 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5078 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5079 }
5080
4549 REG_WR(bp, SRC_REG_SOFT_RST, 1); 5081 REG_WR(bp, SRC_REG_SOFT_RST, 1);
4550 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) 5082 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4551 REG_WR(bp, i, random32()); 5083 REG_WR(bp, i, random32());
@@ -4583,6 +5115,11 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4583 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); 5115 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4584 5116
4585 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE); 5117 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5118
5119 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5120 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5121
5122 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
4586 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE); 5123 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4587 5124
4588 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE); 5125 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
@@ -4590,16 +5127,35 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4590 REG_WR(bp, 0x2814, 0xffffffff); 5127 REG_WR(bp, 0x2814, 0xffffffff);
4591 REG_WR(bp, 0x3820, 0xffffffff); 5128 REG_WR(bp, 0x3820, 0xffffffff);
4592 5129
5130 if (CHIP_IS_E2(bp)) {
5131 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5132 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5133 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5134 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5135 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5136 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5137 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5138 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5139 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5140 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5141 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5142 }
5143
4593 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE); 5144 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4594 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE); 5145 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4595 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE); 5146 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4596 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE); 5147 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4597 5148
4598 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); 5149 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4599 if (CHIP_IS_E1H(bp)) { 5150 if (!CHIP_IS_E1(bp)) {
4600 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 5151 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
4601 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp)); 5152 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
4602 } 5153 }
5154 if (CHIP_IS_E2(bp)) {
5155 /* Bit-map indicating which L2 hdrs may appear after the
5156 basic Ethernet header */
5157 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5158 }
4603 5159
4604 if (CHIP_REV_IS_SLOW(bp)) 5160 if (CHIP_REV_IS_SLOW(bp))
4605 msleep(200); 5161 msleep(200);
@@ -4622,15 +5178,17 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4622 } 5178 }
4623 REG_WR(bp, CFC_REG_DEBUG0, 0); 5179 REG_WR(bp, CFC_REG_DEBUG0, 0);
4624 5180
4625 /* read NIG statistic 5181 if (CHIP_IS_E1(bp)) {
4626 to see if this is our first up since powerup */ 5182 /* read NIG statistic
4627 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 5183 to see if this is our first up since powerup */
4628 val = *bnx2x_sp(bp, wb_data[0]); 5184 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5185 val = *bnx2x_sp(bp, wb_data[0]);
4629 5186
4630 /* do internal memory self test */ 5187 /* do internal memory self test */
4631 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) { 5188 if ((val == 0) && bnx2x_int_mem_test(bp)) {
4632 BNX2X_ERR("internal mem self test failed\n"); 5189 BNX2X_ERR("internal mem self test failed\n");
4633 return -EBUSY; 5190 return -EBUSY;
5191 }
4634 } 5192 }
4635 5193
4636 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, 5194 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
@@ -4647,10 +5205,23 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4647 enable_blocks_parity(bp); 5205 enable_blocks_parity(bp);
4648 5206
4649 if (!BP_NOMCP(bp)) { 5207 if (!BP_NOMCP(bp)) {
4650 bnx2x_acquire_phy_lock(bp); 5208 /* In E2 2-PORT mode, same ext phy is used for the two paths */
4651 bnx2x_common_init_phy(bp, bp->common.shmem_base, 5209 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
4652 bp->common.shmem2_base); 5210 CHIP_IS_E1x(bp)) {
4653 bnx2x_release_phy_lock(bp); 5211 u32 shmem_base[2], shmem2_base[2];
5212 shmem_base[0] = bp->common.shmem_base;
5213 shmem2_base[0] = bp->common.shmem2_base;
5214 if (CHIP_IS_E2(bp)) {
5215 shmem_base[1] =
5216 SHMEM2_RD(bp, other_shmem_base_addr);
5217 shmem2_base[1] =
5218 SHMEM2_RD(bp, other_shmem2_base_addr);
5219 }
5220 bnx2x_acquire_phy_lock(bp);
5221 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5222 bp->common.chip_id);
5223 bnx2x_release_phy_lock(bp);
5224 }
4654 } else 5225 } else
4655 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 5226 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4656 5227
@@ -4671,6 +5242,14 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
4671 bnx2x_init_block(bp, PXP_BLOCK, init_stage); 5242 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4672 bnx2x_init_block(bp, PXP2_BLOCK, init_stage); 5243 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4673 5244
5245 /* Timers bug workaround: disables the pf_master bit in pglue at
5246 * common phase, we need to enable it here before any dmae access are
5247 * attempted. Therefore we manually added the enable-master to the
5248 * port phase (it also happens in the function phase)
5249 */
5250 if (CHIP_IS_E2(bp))
5251 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5252
4674 bnx2x_init_block(bp, TCM_BLOCK, init_stage); 5253 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4675 bnx2x_init_block(bp, UCM_BLOCK, init_stage); 5254 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4676 bnx2x_init_block(bp, CCM_BLOCK, init_stage); 5255 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
@@ -4687,29 +5266,41 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
4687 5266
4688 bnx2x_init_block(bp, DQ_BLOCK, init_stage); 5267 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4689 5268
4690 bnx2x_init_block(bp, BRB1_BLOCK, init_stage); 5269 if (CHIP_MODE_IS_4_PORT(bp))
4691 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) { 5270 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
4692 /* no pause for emulation and FPGA */ 5271
4693 low = 0; 5272 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
4694 high = 513; 5273 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4695 } else { 5274 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
4696 if (IS_MF(bp)) 5275 /* no pause for emulation and FPGA */
4697 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 5276 low = 0;
4698 else if (bp->dev->mtu > 4096) { 5277 high = 513;
4699 if (bp->flags & ONE_PORT_FLAG) 5278 } else {
4700 low = 160; 5279 if (IS_MF(bp))
4701 else { 5280 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4702 val = bp->dev->mtu; 5281 else if (bp->dev->mtu > 4096) {
4703 /* (24*1024 + val*4)/256 */ 5282 if (bp->flags & ONE_PORT_FLAG)
4704 low = 96 + (val/64) + ((val % 64) ? 1 : 0); 5283 low = 160;
4705 } 5284 else {
4706 } else 5285 val = bp->dev->mtu;
4707 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); 5286 /* (24*1024 + val*4)/256 */
4708 high = low + 56; /* 14*1024/256 */ 5287 low = 96 + (val/64) +
5288 ((val % 64) ? 1 : 0);
5289 }
5290 } else
5291 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5292 high = low + 56; /* 14*1024/256 */
5293 }
5294 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5295 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4709 } 5296 }
4710 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4711 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4712 5297
5298 if (CHIP_MODE_IS_4_PORT(bp)) {
5299 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5300 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5301 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5302 BRB1_REG_MAC_GUARANTIED_0), 40);
5303 }
4713 5304
4714 bnx2x_init_block(bp, PRS_BLOCK, init_stage); 5305 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4715 5306
@@ -4722,24 +5313,28 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
4722 bnx2x_init_block(bp, USEM_BLOCK, init_stage); 5313 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4723 bnx2x_init_block(bp, CSEM_BLOCK, init_stage); 5314 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4724 bnx2x_init_block(bp, XSEM_BLOCK, init_stage); 5315 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5316 if (CHIP_MODE_IS_4_PORT(bp))
5317 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
4725 5318
4726 bnx2x_init_block(bp, UPB_BLOCK, init_stage); 5319 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4727 bnx2x_init_block(bp, XPB_BLOCK, init_stage); 5320 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4728 5321
4729 bnx2x_init_block(bp, PBF_BLOCK, init_stage); 5322 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4730 5323
4731 /* configure PBF to work without PAUSE mtu 9000 */ 5324 if (!CHIP_IS_E2(bp)) {
4732 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 5325 /* configure PBF to work without PAUSE mtu 9000 */
5326 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4733 5327
4734 /* update threshold */ 5328 /* update threshold */
4735 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 5329 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4736 /* update init credit */ 5330 /* update init credit */
4737 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 5331 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4738 5332
4739 /* probe changes */ 5333 /* probe changes */
4740 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); 5334 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4741 msleep(5); 5335 udelay(50);
4742 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 5336 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5337 }
4743 5338
4744#ifdef BCM_CNIC 5339#ifdef BCM_CNIC
4745 bnx2x_init_block(bp, SRCH_BLOCK, init_stage); 5340 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
@@ -4753,6 +5348,8 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
4753 } 5348 }
4754 bnx2x_init_block(bp, HC_BLOCK, init_stage); 5349 bnx2x_init_block(bp, HC_BLOCK, init_stage);
4755 5350
5351 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5352
4756 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage); 5353 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4757 /* init aeu_mask_attn_func_0/1: 5354 /* init aeu_mask_attn_func_0/1:
4758 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 5355 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
@@ -4771,11 +5368,25 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
4771 5368
4772 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 5369 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4773 5370
4774 if (CHIP_IS_E1H(bp)) { 5371 if (!CHIP_IS_E1(bp)) {
4775 /* 0x2 disable mf_ov, 0x1 enable */ 5372 /* 0x2 disable mf_ov, 0x1 enable */
4776 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 5373 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4777 (IS_MF(bp) ? 0x1 : 0x2)); 5374 (IS_MF(bp) ? 0x1 : 0x2));
4778 5375
5376 if (CHIP_IS_E2(bp)) {
5377 val = 0;
5378 switch (bp->mf_mode) {
5379 case MULTI_FUNCTION_SD:
5380 val = 1;
5381 break;
5382 case MULTI_FUNCTION_SI:
5383 val = 2;
5384 break;
5385 }
5386
5387 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5388 NIG_REG_LLH0_CLS_TYPE), val);
5389 }
4779 { 5390 {
4780 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 5391 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4781 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 5392 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
@@ -4805,14 +5416,26 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4805{ 5416{
4806 int reg; 5417 int reg;
4807 5418
4808 if (CHIP_IS_E1H(bp)) 5419 if (CHIP_IS_E1(bp))
4809 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4810 else /* E1 */
4811 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 5420 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5421 else
5422 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4812 5423
4813 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); 5424 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4814} 5425}
4815 5426
5427static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5428{
5429 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5430}
5431
5432static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5433{
5434 u32 i, base = FUNC_ILT_BASE(func);
5435 for (i = base; i < base + ILT_PER_FUNC; i++)
5436 bnx2x_ilt_wr(bp, i, 0);
5437}
5438
4816static int bnx2x_init_hw_func(struct bnx2x *bp) 5439static int bnx2x_init_hw_func(struct bnx2x *bp)
4817{ 5440{
4818 int port = BP_PORT(bp); 5441 int port = BP_PORT(bp);
@@ -4825,10 +5448,12 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
4825 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); 5448 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
4826 5449
4827 /* set MSI reconfigure capability */ 5450 /* set MSI reconfigure capability */
4828 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 5451 if (bp->common.int_block == INT_BLOCK_HC) {
4829 val = REG_RD(bp, addr); 5452 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4830 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 5453 val = REG_RD(bp, addr);
4831 REG_WR(bp, addr, val); 5454 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5455 REG_WR(bp, addr, val);
5456 }
4832 5457
4833 ilt = BP_ILT(bp); 5458 ilt = BP_ILT(bp);
4834 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 5459 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
@@ -4854,10 +5479,38 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
4854 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5479 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4855#endif /* BCM_CNIC */ 5480#endif /* BCM_CNIC */
4856 5481
5482 if (CHIP_IS_E2(bp)) {
5483 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5484
5485 /* Turn on a single ISR mode in IGU if driver is going to use
5486 * INT#x or MSI
5487 */
5488 if (!(bp->flags & USING_MSIX_FLAG))
5489 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5490 /*
5491 * Timers workaround bug: function init part.
5492 * Need to wait 20msec after initializing ILT,
5493 * needed to make sure there are no requests in
5494 * one of the PXP internal queues with "old" ILT addresses
5495 */
5496 msleep(20);
5497 /*
5498 * Master enable - Due to WB DMAE writes performed before this
5499 * register is re-initialized as part of the regular function
5500 * init
5501 */
5502 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5503 /* Enable the function in IGU */
5504 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5505 }
5506
4857 bp->dmae_ready = 1; 5507 bp->dmae_ready = 1;
4858 5508
4859 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func); 5509 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
4860 5510
5511 if (CHIP_IS_E2(bp))
5512 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5513
4861 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); 5514 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4862 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func); 5515 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4863 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func); 5516 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
@@ -4868,7 +5521,24 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
4868 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func); 5521 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4869 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func); 5522 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4870 5523
5524 if (CHIP_IS_E2(bp)) {
5525 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5526 BP_PATH(bp));
5527 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5528 BP_PATH(bp));
5529 }
5530
5531 if (CHIP_MODE_IS_4_PORT(bp))
5532 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5533
5534 if (CHIP_IS_E2(bp))
5535 REG_WR(bp, QM_REG_PF_EN, 1);
5536
4871 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func); 5537 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5538
5539 if (CHIP_MODE_IS_4_PORT(bp))
5540 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5541
4872 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func); 5542 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
4873 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func); 5543 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
4874 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func); 5544 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
@@ -4880,10 +5550,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
4880 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func); 5550 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
4881 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func); 5551 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
4882 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func); 5552 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5553 if (CHIP_IS_E2(bp))
5554 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5555
4883 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func); 5556 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
4884 5557
4885 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func); 5558 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
4886 5559
5560 if (CHIP_IS_E2(bp))
5561 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5562
4887 if (IS_MF(bp)) { 5563 if (IS_MF(bp)) {
4888 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 5564 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4889 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); 5565 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
@@ -4892,13 +5568,117 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
4892 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func); 5568 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
4893 5569
4894 /* HC init per function */ 5570 /* HC init per function */
4895 if (CHIP_IS_E1H(bp)) { 5571 if (bp->common.int_block == INT_BLOCK_HC) {
5572 if (CHIP_IS_E1H(bp)) {
5573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5574
5575 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5576 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5577 }
5578 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5579
5580 } else {
5581 int num_segs, sb_idx, prod_offset;
5582
4896 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 5583 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4897 5584
4898 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 5585 if (CHIP_IS_E2(bp)) {
4899 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 5586 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5587 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5588 }
5589
5590 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5591
5592 if (CHIP_IS_E2(bp)) {
5593 int dsb_idx = 0;
5594 /**
5595 * Producer memory:
5596 * E2 mode: address 0-135 match to the mapping memory;
5597 * 136 - PF0 default prod; 137 - PF1 default prod;
5598 * 138 - PF2 default prod; 139 - PF3 default prod;
5599 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5600 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5601 * 144-147 reserved.
5602 *
5603 * E1.5 mode - In backward compatible mode;
5604 * for non default SB; each even line in the memory
5605 * holds the U producer and each odd line hold
5606 * the C producer. The first 128 producers are for
5607 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5608 * producers are for the DSB for each PF.
5609 * Each PF has five segments: (the order inside each
5610 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5611 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5612 * 144-147 attn prods;
5613 */
5614 /* non-default-status-blocks */
5615 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5616 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5617 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5618 prod_offset = (bp->igu_base_sb + sb_idx) *
5619 num_segs;
5620
5621 for (i = 0; i < num_segs; i++) {
5622 addr = IGU_REG_PROD_CONS_MEMORY +
5623 (prod_offset + i) * 4;
5624 REG_WR(bp, addr, 0);
5625 }
5626 /* send consumer update with value 0 */
5627 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5628 USTORM_ID, 0, IGU_INT_NOP, 1);
5629 bnx2x_igu_clear_sb(bp,
5630 bp->igu_base_sb + sb_idx);
5631 }
5632
5633 /* default-status-blocks */
5634 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5635 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5636
5637 if (CHIP_MODE_IS_4_PORT(bp))
5638 dsb_idx = BP_FUNC(bp);
5639 else
5640 dsb_idx = BP_E1HVN(bp);
5641
5642 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5643 IGU_BC_BASE_DSB_PROD + dsb_idx :
5644 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5645
5646 for (i = 0; i < (num_segs * E1HVN_MAX);
5647 i += E1HVN_MAX) {
5648 addr = IGU_REG_PROD_CONS_MEMORY +
5649 (prod_offset + i)*4;
5650 REG_WR(bp, addr, 0);
5651 }
5652 /* send consumer update with 0 */
5653 if (CHIP_INT_MODE_IS_BC(bp)) {
5654 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5655 USTORM_ID, 0, IGU_INT_NOP, 1);
5656 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5657 CSTORM_ID, 0, IGU_INT_NOP, 1);
5658 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5659 XSTORM_ID, 0, IGU_INT_NOP, 1);
5660 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5661 TSTORM_ID, 0, IGU_INT_NOP, 1);
5662 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5663 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5664 } else {
5665 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5666 USTORM_ID, 0, IGU_INT_NOP, 1);
5667 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5668 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5669 }
5670 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5671
5672 /* !!! these should become driver const once
5673 rf-tool supports split-68 const */
5674 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5675 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5676 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5677 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5678 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5679 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5680 }
4900 } 5681 }
4901 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4902 5682
4903 /* Reset PCIE errors for debug */ 5683 /* Reset PCIE errors for debug */
4904 REG_WR(bp, 0x2114, 0xffffffff); 5684 REG_WR(bp, 0x2114, 0xffffffff);
@@ -4920,7 +5700,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4920 int rc = 0; 5700 int rc = 0;
4921 5701
4922 DP(BNX2X_MSG_MCP, "function %d load_code %x\n", 5702 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4923 BP_FUNC(bp), load_code); 5703 BP_ABS_FUNC(bp), load_code);
4924 5704
4925 bp->dmae_ready = 0; 5705 bp->dmae_ready = 0;
4926 mutex_init(&bp->dmae_mutex); 5706 mutex_init(&bp->dmae_mutex);
@@ -4930,6 +5710,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4930 5710
4931 switch (load_code) { 5711 switch (load_code) {
4932 case FW_MSG_CODE_DRV_LOAD_COMMON: 5712 case FW_MSG_CODE_DRV_LOAD_COMMON:
5713 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4933 rc = bnx2x_init_hw_common(bp, load_code); 5714 rc = bnx2x_init_hw_common(bp, load_code);
4934 if (rc) 5715 if (rc)
4935 goto init_hw_err; 5716 goto init_hw_err;
@@ -4953,10 +5734,10 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4953 } 5734 }
4954 5735
4955 if (!BP_NOMCP(bp)) { 5736 if (!BP_NOMCP(bp)) {
4956 int func = BP_FUNC(bp); 5737 int mb_idx = BP_FW_MB_IDX(bp);
4957 5738
4958 bp->fw_drv_pulse_wr_seq = 5739 bp->fw_drv_pulse_wr_seq =
4959 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) & 5740 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
4960 DRV_PULSE_SEQ_MASK); 5741 DRV_PULSE_SEQ_MASK);
4961 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); 5742 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4962 } 5743 }
@@ -4993,9 +5774,14 @@ void bnx2x_free_mem(struct bnx2x *bp)
4993 /* Common */ 5774 /* Common */
4994 for_each_queue(bp, i) { 5775 for_each_queue(bp, i) {
4995 /* status blocks */ 5776 /* status blocks */
4996 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb), 5777 if (CHIP_IS_E2(bp))
4997 bnx2x_fp(bp, i, status_blk_mapping), 5778 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
4998 sizeof(struct host_hc_status_block_e1x)); 5779 bnx2x_fp(bp, i, status_blk_mapping),
5780 sizeof(struct host_hc_status_block_e2));
5781 else
5782 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5783 bnx2x_fp(bp, i, status_blk_mapping),
5784 sizeof(struct host_hc_status_block_e1x));
4999 } 5785 }
5000 /* Rx */ 5786 /* Rx */
5001 for_each_queue(bp, i) { 5787 for_each_queue(bp, i) {
@@ -5041,9 +5827,12 @@ void bnx2x_free_mem(struct bnx2x *bp)
5041 5827
5042 BNX2X_FREE(bp->ilt->lines); 5828 BNX2X_FREE(bp->ilt->lines);
5043#ifdef BCM_CNIC 5829#ifdef BCM_CNIC
5044 5830 if (CHIP_IS_E2(bp))
5045 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, 5831 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5046 sizeof(struct host_hc_status_block_e1x)); 5832 sizeof(struct host_hc_status_block_e2));
5833 else
5834 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5835 sizeof(struct host_hc_status_block_e1x));
5047 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 5836 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5048#endif 5837#endif
5049 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 5838 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
@@ -5055,6 +5844,22 @@ void bnx2x_free_mem(struct bnx2x *bp)
5055#undef BNX2X_KFREE 5844#undef BNX2X_KFREE
5056} 5845}
5057 5846
5847static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5848{
5849 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5850 if (CHIP_IS_E2(bp)) {
5851 bnx2x_fp(bp, index, sb_index_values) =
5852 (__le16 *)status_blk.e2_sb->sb.index_values;
5853 bnx2x_fp(bp, index, sb_running_index) =
5854 (__le16 *)status_blk.e2_sb->sb.running_index;
5855 } else {
5856 bnx2x_fp(bp, index, sb_index_values) =
5857 (__le16 *)status_blk.e1x_sb->sb.index_values;
5858 bnx2x_fp(bp, index, sb_running_index) =
5859 (__le16 *)status_blk.e1x_sb->sb.running_index;
5860 }
5861}
5862
5058int bnx2x_alloc_mem(struct bnx2x *bp) 5863int bnx2x_alloc_mem(struct bnx2x *bp)
5059{ 5864{
5060 5865
@@ -5074,25 +5879,23 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
5074 } while (0) 5879 } while (0)
5075 5880
5076 int i; 5881 int i;
5077 void *p;
5078 5882
5079 /* fastpath */ 5883 /* fastpath */
5080 /* Common */ 5884 /* Common */
5081 for_each_queue(bp, i) { 5885 for_each_queue(bp, i) {
5886 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
5082 bnx2x_fp(bp, i, bp) = bp; 5887 bnx2x_fp(bp, i, bp) = bp;
5083
5084 /* status blocks */ 5888 /* status blocks */
5085 BNX2X_PCI_ALLOC(p, 5889 if (CHIP_IS_E2(bp))
5890 BNX2X_PCI_ALLOC(sb->e2_sb,
5891 &bnx2x_fp(bp, i, status_blk_mapping),
5892 sizeof(struct host_hc_status_block_e2));
5893 else
5894 BNX2X_PCI_ALLOC(sb->e1x_sb,
5086 &bnx2x_fp(bp, i, status_blk_mapping), 5895 &bnx2x_fp(bp, i, status_blk_mapping),
5087 sizeof(struct host_hc_status_block_e1x)); 5896 sizeof(struct host_hc_status_block_e1x));
5088 5897
5089 bnx2x_fp(bp, i, status_blk.e1x_sb) = 5898 set_sb_shortcuts(bp, i);
5090 (struct host_hc_status_block_e1x *)p;
5091
5092 bnx2x_fp(bp, i, sb_index_values) = (__le16 *)
5093 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.index_values);
5094 bnx2x_fp(bp, i, sb_running_index) = (__le16 *)
5095 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.running_index);
5096 } 5899 }
5097 /* Rx */ 5900 /* Rx */
5098 for_each_queue(bp, i) { 5901 for_each_queue(bp, i) {
@@ -5129,8 +5932,12 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
5129 /* end of fastpath */ 5932 /* end of fastpath */
5130 5933
5131#ifdef BCM_CNIC 5934#ifdef BCM_CNIC
5132 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, 5935 if (CHIP_IS_E2(bp))
5133 sizeof(struct host_hc_status_block_e1x)); 5936 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5937 sizeof(struct host_hc_status_block_e2));
5938 else
5939 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5940 sizeof(struct host_hc_status_block_e1x));
5134 5941
5135 /* allocate searcher T2 table */ 5942 /* allocate searcher T2 table */
5136 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 5943 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
@@ -5210,11 +6017,6 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
5210 bp->set_mac_pending = 1; 6017 bp->set_mac_pending = 1;
5211 smp_wmb(); 6018 smp_wmb();
5212 6019
5213 config->hdr.length = 1 + (is_bcast ? 1 : 0);
5214 config->hdr.offset = cam_offset;
5215 config->hdr.client_id = 0xff;
5216 config->hdr.reserved1 = 0;
5217
5218 config->hdr.length = 1; 6020 config->hdr.length = 1;
5219 config->hdr.offset = cam_offset; 6021 config->hdr.offset = cam_offset;
5220 config->hdr.client_id = 0xff; 6022 config->hdr.client_id = 0xff;
@@ -5312,7 +6114,12 @@ int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5312 6114
5313u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset) 6115u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
5314{ 6116{
5315 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); 6117 if (CHIP_IS_E1H(bp))
6118 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6119 else if (CHIP_MODE_IS_4_PORT(bp))
6120 return BP_FUNC(bp) * 32 + rel_offset;
6121 else
6122 return BP_VN(bp) * 32 + rel_offset;
5316} 6123}
5317 6124
5318void bnx2x_set_eth_mac(struct bnx2x *bp, int set) 6125void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
@@ -5804,9 +6611,11 @@ static void bnx2x_reset_func(struct bnx2x *bp)
5804{ 6611{
5805 int port = BP_PORT(bp); 6612 int port = BP_PORT(bp);
5806 int func = BP_FUNC(bp); 6613 int func = BP_FUNC(bp);
5807 int base, i; 6614 int i;
5808 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) + 6615 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
5809 offsetof(struct hc_status_block_data_e1x, common); 6616 (CHIP_IS_E2(bp) ?
6617 offsetof(struct hc_status_block_data_e2, common) :
6618 offsetof(struct hc_status_block_data_e1x, common));
5810 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func); 6619 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
5811 int pfid_offset = offsetof(struct pci_entity, pf_id); 6620 int pfid_offset = offsetof(struct pci_entity, pf_id);
5812 6621
@@ -5839,8 +6648,13 @@ static void bnx2x_reset_func(struct bnx2x *bp)
5839 0); 6648 0);
5840 6649
5841 /* Configure IGU */ 6650 /* Configure IGU */
5842 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 6651 if (bp->common.int_block == INT_BLOCK_HC) {
5843 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 6652 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6653 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6654 } else {
6655 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6656 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6657 }
5844 6658
5845#ifdef BCM_CNIC 6659#ifdef BCM_CNIC
5846 /* Disable Timer scan */ 6660 /* Disable Timer scan */
@@ -5856,9 +6670,25 @@ static void bnx2x_reset_func(struct bnx2x *bp)
5856 } 6670 }
5857#endif 6671#endif
5858 /* Clear ILT */ 6672 /* Clear ILT */
5859 base = FUNC_ILT_BASE(func); 6673 bnx2x_clear_func_ilt(bp, func);
5860 for (i = base; i < base + ILT_PER_FUNC; i++) 6674
5861 bnx2x_ilt_wr(bp, i, 0); 6675 /* Timers workaround bug for E2: if this is vnic-3,
6676 * we need to set the entire ilt range for this timers.
6677 */
6678 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6679 struct ilt_client_info ilt_cli;
6680 /* use dummy TM client */
6681 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6682 ilt_cli.start = 0;
6683 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6684 ilt_cli.client_num = ILT_CLIENT_TM;
6685
6686 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6687 }
6688
6689 /* this assumes that reset_port() called before reset_func()*/
6690 if (CHIP_IS_E2(bp))
6691 bnx2x_pf_disable(bp);
5862 6692
5863 bp->dmae_ready = 0; 6693 bp->dmae_ready = 0;
5864} 6694}
@@ -5892,7 +6722,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
5892static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) 6722static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5893{ 6723{
5894 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", 6724 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5895 BP_FUNC(bp), reset_code); 6725 BP_ABS_FUNC(bp), reset_code);
5896 6726
5897 switch (reset_code) { 6727 switch (reset_code) {
5898 case FW_MSG_CODE_DRV_UNLOAD_COMMON: 6728 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
@@ -6024,15 +6854,20 @@ unload_error:
6024 if (!BP_NOMCP(bp)) 6854 if (!BP_NOMCP(bp))
6025 reset_code = bnx2x_fw_command(bp, reset_code, 0); 6855 reset_code = bnx2x_fw_command(bp, reset_code, 0);
6026 else { 6856 else {
6027 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n", 6857 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6028 load_count[0], load_count[1], load_count[2]); 6858 "%d, %d, %d\n", BP_PATH(bp),
6029 load_count[0]--; 6859 load_count[BP_PATH(bp)][0],
6030 load_count[1 + port]--; 6860 load_count[BP_PATH(bp)][1],
6031 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n", 6861 load_count[BP_PATH(bp)][2]);
6032 load_count[0], load_count[1], load_count[2]); 6862 load_count[BP_PATH(bp)][0]--;
6033 if (load_count[0] == 0) 6863 load_count[BP_PATH(bp)][1 + port]--;
6864 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6865 "%d, %d, %d\n", BP_PATH(bp),
6866 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6867 load_count[BP_PATH(bp)][2]);
6868 if (load_count[BP_PATH(bp)][0] == 0)
6034 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 6869 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6035 else if (load_count[1 + port] == 0) 6870 else if (load_count[BP_PATH(bp)][1 + port] == 0)
6036 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 6871 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6037 else 6872 else
6038 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 6873 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -6531,39 +7366,23 @@ reset_task_exit:
6531 * Init service functions 7366 * Init service functions
6532 */ 7367 */
6533 7368
6534static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func) 7369u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
6535{ 7370{
6536 switch (func) { 7371 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
6537 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0; 7372 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
6538 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1; 7373 return base + (BP_ABS_FUNC(bp)) * stride;
6539 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
6540 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
6541 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
6542 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
6543 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
6544 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
6545 default:
6546 BNX2X_ERR("Unsupported function index: %d\n", func);
6547 return (u32)(-1);
6548 }
6549} 7374}
6550 7375
6551static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func) 7376static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
6552{ 7377{
6553 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val; 7378 u32 reg = bnx2x_get_pretend_reg(bp);
6554 7379
6555 /* Flush all outstanding writes */ 7380 /* Flush all outstanding writes */
6556 mmiowb(); 7381 mmiowb();
6557 7382
6558 /* Pretend to be function 0 */ 7383 /* Pretend to be function 0 */
6559 REG_WR(bp, reg, 0); 7384 REG_WR(bp, reg, 0);
6560 /* Flush the GRC transaction (in the chip) */ 7385 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
6561 new_val = REG_RD(bp, reg);
6562 if (new_val != 0) {
6563 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
6564 new_val);
6565 BUG();
6566 }
6567 7386
6568 /* From now we are in the "like-E1" mode */ 7387 /* From now we are in the "like-E1" mode */
6569 bnx2x_int_disable(bp); 7388 bnx2x_int_disable(bp);
@@ -6571,22 +7390,17 @@ static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
6571 /* Flush all outstanding writes */ 7390 /* Flush all outstanding writes */
6572 mmiowb(); 7391 mmiowb();
6573 7392
6574 /* Restore the original funtion settings */ 7393 /* Restore the original function */
6575 REG_WR(bp, reg, orig_func); 7394 REG_WR(bp, reg, BP_ABS_FUNC(bp));
6576 new_val = REG_RD(bp, reg); 7395 REG_RD(bp, reg);
6577 if (new_val != orig_func) {
6578 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
6579 orig_func, new_val);
6580 BUG();
6581 }
6582} 7396}
6583 7397
6584static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func) 7398static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
6585{ 7399{
6586 if (CHIP_IS_E1H(bp)) 7400 if (CHIP_IS_E1(bp))
6587 bnx2x_undi_int_disable_e1h(bp, func);
6588 else
6589 bnx2x_int_disable(bp); 7401 bnx2x_int_disable(bp);
7402 else
7403 bnx2x_undi_int_disable_e1h(bp);
6590} 7404}
6591 7405
6592static void __devinit bnx2x_undi_unload(struct bnx2x *bp) 7406static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
@@ -6603,8 +7417,8 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6603 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 7417 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6604 if (val == 0x7) { 7418 if (val == 0x7) {
6605 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 7419 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6606 /* save our func */ 7420 /* save our pf_num */
6607 int func = BP_FUNC(bp); 7421 int orig_pf_num = bp->pf_num;
6608 u32 swap_en; 7422 u32 swap_en;
6609 u32 swap_val; 7423 u32 swap_val;
6610 7424
@@ -6614,9 +7428,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6614 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 7428 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6615 7429
6616 /* try unload UNDI on port 0 */ 7430 /* try unload UNDI on port 0 */
6617 bp->func = 0; 7431 bp->pf_num = 0;
6618 bp->fw_seq = 7432 bp->fw_seq =
6619 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7433 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
6620 DRV_MSG_SEQ_NUMBER_MASK); 7434 DRV_MSG_SEQ_NUMBER_MASK);
6621 reset_code = bnx2x_fw_command(bp, reset_code, 0); 7435 reset_code = bnx2x_fw_command(bp, reset_code, 0);
6622 7436
@@ -6628,9 +7442,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6628 DRV_MSG_CODE_UNLOAD_DONE, 0); 7442 DRV_MSG_CODE_UNLOAD_DONE, 0);
6629 7443
6630 /* unload UNDI on port 1 */ 7444 /* unload UNDI on port 1 */
6631 bp->func = 1; 7445 bp->pf_num = 1;
6632 bp->fw_seq = 7446 bp->fw_seq =
6633 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7447 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
6634 DRV_MSG_SEQ_NUMBER_MASK); 7448 DRV_MSG_SEQ_NUMBER_MASK);
6635 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 7449 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6636 7450
@@ -6640,7 +7454,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6640 /* now it's safe to release the lock */ 7454 /* now it's safe to release the lock */
6641 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 7455 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6642 7456
6643 bnx2x_undi_int_disable(bp, func); 7457 bnx2x_undi_int_disable(bp);
6644 7458
6645 /* close input traffic and wait for it */ 7459 /* close input traffic and wait for it */
6646 /* Do not rcv packets to BRB */ 7460 /* Do not rcv packets to BRB */
@@ -6679,9 +7493,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6679 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 7493 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
6680 7494
6681 /* restore our func and fw_seq */ 7495 /* restore our func and fw_seq */
6682 bp->func = func; 7496 bp->pf_num = orig_pf_num;
6683 bp->fw_seq = 7497 bp->fw_seq =
6684 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7498 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
6685 DRV_MSG_SEQ_NUMBER_MASK); 7499 DRV_MSG_SEQ_NUMBER_MASK);
6686 7500
6687 } else 7501 } else
@@ -6705,20 +7519,42 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6705 val = REG_RD(bp, MISC_REG_BOND_ID); 7519 val = REG_RD(bp, MISC_REG_BOND_ID);
6706 id |= (val & 0xf); 7520 id |= (val & 0xf);
6707 bp->common.chip_id = id; 7521 bp->common.chip_id = id;
6708 bp->link_params.chip_id = bp->common.chip_id;
6709 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6710
6711 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
6712 7522
6713 /* Set doorbell size */ 7523 /* Set doorbell size */
6714 bp->db_size = (1 << BNX2X_DB_SHIFT); 7524 bp->db_size = (1 << BNX2X_DB_SHIFT);
6715 7525
7526 if (CHIP_IS_E2(bp)) {
7527 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7528 if ((val & 1) == 0)
7529 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7530 else
7531 val = (val >> 1) & 1;
7532 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7533 "2_PORT_MODE");
7534 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7535 CHIP_2_PORT_MODE;
7536
7537 if (CHIP_MODE_IS_4_PORT(bp))
7538 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7539 else
7540 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7541 } else {
7542 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7543 bp->pfid = bp->pf_num; /* 0..7 */
7544 }
7545
6716 /* 7546 /*
6717 * set base FW non-default (fast path) status block id, this value is 7547 * set base FW non-default (fast path) status block id, this value is
6718 * used to initialize the fw_sb_id saved on the fp/queue structure to 7548 * used to initialize the fw_sb_id saved on the fp/queue structure to
6719 * determine the id used by the FW. 7549 * determine the id used by the FW.
6720 */ 7550 */
6721 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x; 7551 if (CHIP_IS_E1x(bp))
7552 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7553 else /* E2 */
7554 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7555
7556 bp->link_params.chip_id = bp->common.chip_id;
7557 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6722 7558
6723 val = (REG_RD(bp, 0x2874) & 0x55); 7559 val = (REG_RD(bp, 0x2874) & 0x55);
6724 if ((bp->common.chip_id & 0x1) || 7560 if ((bp->common.chip_id & 0x1) ||
@@ -6734,15 +7570,15 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6734 bp->common.flash_size, bp->common.flash_size); 7570 bp->common.flash_size, bp->common.flash_size);
6735 7571
6736 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 7572 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6737 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0); 7573 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7574 MISC_REG_GENERIC_CR_1 :
7575 MISC_REG_GENERIC_CR_0));
6738 bp->link_params.shmem_base = bp->common.shmem_base; 7576 bp->link_params.shmem_base = bp->common.shmem_base;
6739 bp->link_params.shmem2_base = bp->common.shmem2_base; 7577 bp->link_params.shmem2_base = bp->common.shmem2_base;
6740 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 7578 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6741 bp->common.shmem_base, bp->common.shmem2_base); 7579 bp->common.shmem_base, bp->common.shmem2_base);
6742 7580
6743 if (!bp->common.shmem_base || 7581 if (!bp->common.shmem_base) {
6744 (bp->common.shmem_base < 0xA0000) ||
6745 (bp->common.shmem_base >= 0xC0000)) {
6746 BNX2X_DEV_INFO("MCP not active\n"); 7582 BNX2X_DEV_INFO("MCP not active\n");
6747 bp->flags |= NO_MCP_FLAG; 7583 bp->flags |= NO_MCP_FLAG;
6748 return; 7584 return;
@@ -6751,7 +7587,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6751 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 7587 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6752 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7588 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6753 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7589 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6754 BNX2X_ERROR("BAD MCP validity signature\n"); 7590 BNX2X_ERR("BAD MCP validity signature\n");
6755 7591
6756 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 7592 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6757 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 7593 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
@@ -6775,8 +7611,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6775 if (val < BNX2X_BC_VER) { 7611 if (val < BNX2X_BC_VER) {
6776 /* for now only warn 7612 /* for now only warn
6777 * later we might need to enforce this */ 7613 * later we might need to enforce this */
6778 BNX2X_ERROR("This driver needs bc_ver %X but found %X, " 7614 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
6779 "please upgrade BC\n", BNX2X_BC_VER, val); 7615 "please upgrade BC\n", BNX2X_BC_VER, val);
6780 } 7616 }
6781 bp->link_params.feature_config_flags |= 7617 bp->link_params.feature_config_flags |=
6782 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 7618 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
@@ -6804,6 +7640,57 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6804 val, val2, val3, val4); 7640 val, val2, val3, val4);
6805} 7641}
6806 7642
7643#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7644#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7645
7646static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7647{
7648 int pfid = BP_FUNC(bp);
7649 int vn = BP_E1HVN(bp);
7650 int igu_sb_id;
7651 u32 val;
7652 u8 fid;
7653
7654 bp->igu_base_sb = 0xff;
7655 bp->igu_sb_cnt = 0;
7656 if (CHIP_INT_MODE_IS_BC(bp)) {
7657 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7658 bp->l2_cid_count);
7659
7660 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7661 FP_SB_MAX_E1x;
7662
7663 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7664 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7665
7666 return;
7667 }
7668
7669 /* IGU in normal mode - read CAM */
7670 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7671 igu_sb_id++) {
7672 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7673 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7674 continue;
7675 fid = IGU_FID(val);
7676 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7677 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7678 continue;
7679 if (IGU_VEC(val) == 0)
7680 /* default status block */
7681 bp->igu_dsb_id = igu_sb_id;
7682 else {
7683 if (bp->igu_base_sb == 0xff)
7684 bp->igu_base_sb = igu_sb_id;
7685 bp->igu_sb_cnt++;
7686 }
7687 }
7688 }
7689 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7690 if (bp->igu_sb_cnt == 0)
7691 BNX2X_ERR("CAM configuration error\n");
7692}
7693
6807static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 7694static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6808 u32 switch_cfg) 7695 u32 switch_cfg)
6809{ 7696{
@@ -7178,26 +8065,49 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7178 8065
7179static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 8066static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7180{ 8067{
7181 int func = BP_FUNC(bp); 8068 int func = BP_ABS_FUNC(bp);
8069 int vn;
7182 u32 val, val2; 8070 u32 val, val2;
7183 int rc = 0; 8071 int rc = 0;
7184 8072
7185 bnx2x_get_common_hwinfo(bp); 8073 bnx2x_get_common_hwinfo(bp);
7186 8074
7187 bp->common.int_block = INT_BLOCK_HC; 8075 if (CHIP_IS_E1x(bp)) {
8076 bp->common.int_block = INT_BLOCK_HC;
8077
8078 bp->igu_dsb_id = DEF_SB_IGU_ID;
8079 bp->igu_base_sb = 0;
8080 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8081 } else {
8082 bp->common.int_block = INT_BLOCK_IGU;
8083 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8084 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8085 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8086 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8087 } else
8088 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
7188 8089
7189 bp->igu_dsb_id = DEF_SB_IGU_ID; 8090 bnx2x_get_igu_cam_info(bp);
7190 bp->igu_base_sb = 0; 8091
7191 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count); 8092 }
8093 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8094 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8095
8096 /*
8097 * Initialize MF configuration
8098 */
7192 8099
7193 bp->mf_ov = 0; 8100 bp->mf_ov = 0;
7194 bp->mf_mode = 0; 8101 bp->mf_mode = 0;
7195 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) { 8102 vn = BP_E1HVN(bp);
7196 8103 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
7197 bp->common.mf_cfg_base = bp->common.shmem_base + 8104 if (SHMEM2_HAS(bp, mf_cfg_addr))
8105 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8106 else
8107 bp->common.mf_cfg_base = bp->common.shmem_base +
7198 offsetof(struct shmem_region, func_mb) + 8108 offsetof(struct shmem_region, func_mb) +
7199 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 8109 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
7200 bp->mf_config = 8110 bp->mf_config[vn] =
7201 MF_CFG_RD(bp, func_mf_config[func].config); 8111 MF_CFG_RD(bp, func_mf_config[func].config);
7202 8112
7203 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) & 8113 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
@@ -7213,16 +8123,16 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7213 FUNC_MF_CFG_E1HOV_TAG_MASK); 8123 FUNC_MF_CFG_E1HOV_TAG_MASK);
7214 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 8124 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7215 bp->mf_ov = val; 8125 bp->mf_ov = val;
7216 BNX2X_DEV_INFO("E1HOV for func %d is %d " 8126 BNX2X_DEV_INFO("MF OV for func %d is %d "
7217 "(0x%04x)\n", 8127 "(0x%04x)\n",
7218 func, bp->mf_ov, bp->mf_ov); 8128 func, bp->mf_ov, bp->mf_ov);
7219 } else { 8129 } else {
7220 BNX2X_ERROR("No valid E1HOV for func %d," 8130 BNX2X_ERROR("No valid MF OV for func %d,"
7221 " aborting\n", func); 8131 " aborting\n", func);
7222 rc = -EPERM; 8132 rc = -EPERM;
7223 } 8133 }
7224 } else { 8134 } else {
7225 if (BP_E1HVN(bp)) { 8135 if (BP_VN(bp)) {
7226 BNX2X_ERROR("VN %d in single function mode," 8136 BNX2X_ERROR("VN %d in single function mode,"
7227 " aborting\n", BP_E1HVN(bp)); 8137 " aborting\n", BP_E1HVN(bp));
7228 rc = -EPERM; 8138 rc = -EPERM;
@@ -7230,15 +8140,25 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7230 } 8140 }
7231 } 8141 }
7232 8142
7233 /* adjust igu_sb_cnt to MF */ 8143 /* adjust igu_sb_cnt to MF for E1x */
7234 if (IS_MF(bp)) 8144 if (CHIP_IS_E1x(bp) && IS_MF(bp))
7235 bp->igu_sb_cnt /= E1HVN_MAX; 8145 bp->igu_sb_cnt /= E1HVN_MAX;
7236 8146
8147 /*
8148 * adjust E2 sb count: to be removed when FW will support
8149 * more then 16 L2 clients
8150 */
8151#define MAX_L2_CLIENTS 16
8152 if (CHIP_IS_E2(bp))
8153 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8154 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8155
7237 if (!BP_NOMCP(bp)) { 8156 if (!BP_NOMCP(bp)) {
7238 bnx2x_get_port_hwinfo(bp); 8157 bnx2x_get_port_hwinfo(bp);
7239 8158
7240 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) & 8159 bp->fw_seq =
7241 DRV_MSG_SEQ_NUMBER_MASK); 8160 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8161 DRV_MSG_SEQ_NUMBER_MASK);
7242 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 8162 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7243 } 8163 }
7244 8164
@@ -7338,7 +8258,7 @@ out_not_found:
7338 8258
7339static int __devinit bnx2x_init_bp(struct bnx2x *bp) 8259static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7340{ 8260{
7341 int func = BP_FUNC(bp); 8261 int func;
7342 int timer_interval; 8262 int timer_interval;
7343 int rc; 8263 int rc;
7344 8264
@@ -7362,6 +8282,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7362 rc = bnx2x_alloc_mem_bp(bp); 8282 rc = bnx2x_alloc_mem_bp(bp);
7363 8283
7364 bnx2x_read_fwinfo(bp); 8284 bnx2x_read_fwinfo(bp);
8285
8286 func = BP_FUNC(bp);
8287
7365 /* need to reset chip if undi was active */ 8288 /* need to reset chip if undi was active */
7366 if (!BP_NOMCP(bp)) 8289 if (!BP_NOMCP(bp))
7367 bnx2x_undi_unload(bp); 8290 bnx2x_undi_unload(bp);
@@ -7650,7 +8573,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7650 bp->dev = dev; 8573 bp->dev = dev;
7651 bp->pdev = pdev; 8574 bp->pdev = pdev;
7652 bp->flags = 0; 8575 bp->flags = 0;
7653 bp->func = PCI_FUNC(pdev->devfn); 8576 bp->pf_num = PCI_FUNC(pdev->devfn);
7654 8577
7655 rc = pci_enable_device(pdev); 8578 rc = pci_enable_device(pdev);
7656 if (rc) { 8579 if (rc) {
@@ -7964,6 +8887,8 @@ int bnx2x_init_firmware(struct bnx2x *bp)
7964 fw_file_name = FW_FILE_NAME_E1; 8887 fw_file_name = FW_FILE_NAME_E1;
7965 else if (CHIP_IS_E1H(bp)) 8888 else if (CHIP_IS_E1H(bp))
7966 fw_file_name = FW_FILE_NAME_E1H; 8889 fw_file_name = FW_FILE_NAME_E1H;
8890 else if (CHIP_IS_E2(bp))
8891 fw_file_name = FW_FILE_NAME_E2;
7967 else { 8892 else {
7968 BNX2X_ERR("Unsupported chip revision\n"); 8893 BNX2X_ERR("Unsupported chip revision\n");
7969 return -EINVAL; 8894 return -EINVAL;
@@ -8047,8 +8972,25 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8047 int pcie_width, pcie_speed; 8972 int pcie_width, pcie_speed;
8048 int rc, cid_count; 8973 int rc, cid_count;
8049 8974
8050 cid_count = FP_SB_MAX_E1x + CNIC_CONTEXT_USE; 8975 switch (ent->driver_data) {
8976 case BCM57710:
8977 case BCM57711:
8978 case BCM57711E:
8979 cid_count = FP_SB_MAX_E1x;
8980 break;
8981
8982 case BCM57712:
8983 case BCM57712E:
8984 cid_count = FP_SB_MAX_E2;
8985 break;
8051 8986
8987 default:
8988 pr_err("Unknown board_type (%ld), aborting\n",
8989 ent->driver_data);
8990 return ENODEV;
8991 }
8992
8993 cid_count += CNIC_CONTEXT_USE;
8052 /* dev zeroed in init_etherdev */ 8994 /* dev zeroed in init_etherdev */
8053 dev = alloc_etherdev_mq(sizeof(*bp), cid_count); 8995 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
8054 if (!dev) { 8996 if (!dev) {
@@ -8086,7 +9028,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8086 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," 9028 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
8087 " IRQ %d, ", board_info[ent->driver_data].name, 9029 " IRQ %d, ", board_info[ent->driver_data].name,
8088 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 9030 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
8089 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz", 9031 pcie_width,
9032 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9033 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9034 "5GHz (Gen2)" : "2.5GHz",
8090 dev->base_addr, bp->pdev->irq); 9035 dev->base_addr, bp->pdev->irq);
8091 pr_cont("node addr %pM\n", dev->dev_addr); 9036 pr_cont("node addr %pM\n", dev->dev_addr);
8092 9037
@@ -8199,8 +9144,9 @@ static void bnx2x_eeh_recover(struct bnx2x *bp)
8199 BNX2X_ERR("BAD MCP validity signature\n"); 9144 BNX2X_ERR("BAD MCP validity signature\n");
8200 9145
8201 if (!BP_NOMCP(bp)) { 9146 if (!BP_NOMCP(bp)) {
8202 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header) 9147 bp->fw_seq =
8203 & DRV_MSG_SEQ_NUMBER_MASK); 9148 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9149 DRV_MSG_SEQ_NUMBER_MASK);
8204 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 9150 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8205 } 9151 }
8206} 9152}
@@ -8283,7 +9229,8 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
8283 struct bnx2x *bp = netdev_priv(dev); 9229 struct bnx2x *bp = netdev_priv(dev);
8284 9230
8285 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 9231 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
8286 printk(KERN_ERR "Handling parity error recovery. Try again later\n"); 9232 printk(KERN_ERR "Handling parity error recovery. "
9233 "Try again later\n");
8287 return; 9234 return;
8288 } 9235 }
8289 9236
@@ -8560,7 +9507,11 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
8560 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 9507 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
8561 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 9508 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
8562 } 9509 }
8563 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; 9510 if (CHIP_IS_E2(bp))
9511 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9512 else
9513 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9514
8564 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp); 9515 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
8565 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp); 9516 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
8566 cp->irq_arr[1].status_blk = bp->def_status_blk; 9517 cp->irq_arr[1].status_blk = bp->def_status_blk;