aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Kravkov <dmitry@broadcom.com>2010-10-05 23:28:26 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-06 17:10:37 -0400
commitf2e0899f0f275cc3f5e9c9726178d7d0ac19b2db (patch)
tree436144046a751427bdd2e3fd284688582d2efe61
parent8fe23fbd94af5a4c117fd0eb2f1c3f492f79efe8 (diff)
bnx2x: Add 57712 support
57712 HW supported with same set of features as for 57710/57711 Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/bnx2x/bnx2x.h124
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c275
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h137
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h35
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c123
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h187
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h3
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h28
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c379
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h6
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c1777
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h879
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c262
-rw-r--r--firmware/Makefile3
14 files changed, 3428 insertions, 790 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 09fb7ff811d8..6f8e2666f05f 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -180,10 +180,16 @@ void bnx2x_panic_dump(struct bnx2x *bp);
180#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) 180#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
181#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \ 181#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
182 offsetof(struct mf_cfg, field)) 182 offsetof(struct mf_cfg, field))
183#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
184 offsetof(struct mf2_cfg, field))
183 185
184#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field)) 186#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
185#define MF_CFG_WR(bp, field, val) REG_WR(bp,\ 187#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
186 MF_CFG_ADDR(bp, field), (val)) 188 MF_CFG_ADDR(bp, field), (val))
189#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field))
190#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \
191 (SHMEM2_RD((bp), size) > \
192 offsetof(struct shmem2_region, field)))
187 193
188#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) 194#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
189#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) 195#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
@@ -296,6 +302,8 @@ union db_prod {
296union host_hc_status_block { 302union host_hc_status_block {
297 /* pointer to fp status block e1x */ 303 /* pointer to fp status block e1x */
298 struct host_hc_status_block_e1x *e1x_sb; 304 struct host_hc_status_block_e1x *e1x_sb;
305 /* pointer to fp status block e2 */
306 struct host_hc_status_block_e2 *e2_sb;
299}; 307};
300 308
301struct bnx2x_fastpath { 309struct bnx2x_fastpath {
@@ -564,12 +572,19 @@ struct bnx2x_common {
564#define CHIP_NUM_57710 0x164e 572#define CHIP_NUM_57710 0x164e
565#define CHIP_NUM_57711 0x164f 573#define CHIP_NUM_57711 0x164f
566#define CHIP_NUM_57711E 0x1650 574#define CHIP_NUM_57711E 0x1650
575#define CHIP_NUM_57712 0x1662
576#define CHIP_NUM_57712E 0x1663
567#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) 577#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
568#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) 578#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
569#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) 579#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
580#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
581#define CHIP_IS_57712E(bp) (CHIP_NUM(bp) == CHIP_NUM_57712E)
570#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ 582#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
571 CHIP_IS_57711E(bp)) 583 CHIP_IS_57711E(bp))
572#define IS_E1H_OFFSET CHIP_IS_E1H(bp) 584#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
585 CHIP_IS_57712E(bp))
586#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
587#define IS_E1H_OFFSET (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp))
573 588
574#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000) 589#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000)
575#define CHIP_REV_Ax 0x00000000 590#define CHIP_REV_Ax 0x00000000
@@ -596,6 +611,7 @@ struct bnx2x_common {
596 u32 shmem_base; 611 u32 shmem_base;
597 u32 shmem2_base; 612 u32 shmem2_base;
598 u32 mf_cfg_base; 613 u32 mf_cfg_base;
614 u32 mf2_cfg_base;
599 615
600 u32 hw_config; 616 u32 hw_config;
601 617
@@ -603,10 +619,25 @@ struct bnx2x_common {
603 619
604 u8 int_block; 620 u8 int_block;
605#define INT_BLOCK_HC 0 621#define INT_BLOCK_HC 0
622#define INT_BLOCK_IGU 1
623#define INT_BLOCK_MODE_NORMAL 0
624#define INT_BLOCK_MODE_BW_COMP 2
625#define CHIP_INT_MODE_IS_NBC(bp) \
626 (CHIP_IS_E2(bp) && \
627 !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
628#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
629
606 u8 chip_port_mode; 630 u8 chip_port_mode;
631#define CHIP_4_PORT_MODE 0x0
632#define CHIP_2_PORT_MODE 0x1
607#define CHIP_PORT_MODE_NONE 0x2 633#define CHIP_PORT_MODE_NONE 0x2
634#define CHIP_MODE(bp) (bp->common.chip_port_mode)
635#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
608}; 636};
609 637
638/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
639#define BNX2X_IGU_STAS_MSG_VF_CNT 64
640#define BNX2X_IGU_STAS_MSG_PF_CNT 4
610 641
611/* end of common */ 642/* end of common */
612 643
@@ -670,7 +701,7 @@ enum {
670 */ 701 */
671 702
672#define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */ 703#define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */
673#define MAX_CONTEXT FP_SB_MAX_E1x 704#define FP_SB_MAX_E2 16 /* fast-path interrupt contexts E2 */
674 705
675/* 706/*
676 * cid_cnt paramter below refers to the value returned by 707 * cid_cnt paramter below refers to the value returned by
@@ -754,7 +785,7 @@ struct bnx2x_slowpath {
754#define MAX_DYNAMIC_ATTN_GRPS 8 785#define MAX_DYNAMIC_ATTN_GRPS 8
755 786
756struct attn_route { 787struct attn_route {
757 u32 sig[4]; 788 u32 sig[5];
758}; 789};
759 790
760struct iro { 791struct iro {
@@ -896,13 +927,20 @@ struct bnx2x {
896#define HW_VLAN_RX_FLAG 0x800 927#define HW_VLAN_RX_FLAG 0x800
897#define MF_FUNC_DIS 0x1000 928#define MF_FUNC_DIS 0x1000
898 929
899 int func; 930 int pf_num; /* absolute PF number */
931 int pfid; /* per-path PF number */
900 int base_fw_ndsb; 932 int base_fw_ndsb;
901 933#define BP_PATH(bp) (!CHIP_IS_E2(bp) ? \
902#define BP_PORT(bp) (bp->func % PORT_MAX) 934 0 : (bp->pf_num & 1))
903#define BP_FUNC(bp) (bp->func) 935#define BP_PORT(bp) (bp->pfid & 1)
904#define BP_E1HVN(bp) (bp->func >> 1) 936#define BP_FUNC(bp) (bp->pfid)
937#define BP_ABS_FUNC(bp) (bp->pf_num)
938#define BP_E1HVN(bp) (bp->pfid >> 1)
939#define BP_VN(bp) (CHIP_MODE_IS_4_PORT(bp) ? \
940 0 : BP_E1HVN(bp))
905#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 941#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
942#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\
943 BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1))
906 944
907#ifdef BCM_CNIC 945#ifdef BCM_CNIC
908#define BCM_CNIC_CID_START 16 946#define BCM_CNIC_CID_START 16
@@ -932,7 +970,8 @@ struct bnx2x {
932 struct cmng_struct_per_port cmng; 970 struct cmng_struct_per_port cmng;
933 u32 vn_weight_sum; 971 u32 vn_weight_sum;
934 972
935 u32 mf_config; 973 u32 mf_config[E1HVN_MAX];
974 u32 mf2_config[E2_FUNC_MAX];
936 u16 mf_ov; 975 u16 mf_ov;
937 u8 mf_mode; 976 u8 mf_mode;
938#define IS_MF(bp) (bp->mf_mode != 0) 977#define IS_MF(bp) (bp->mf_mode != 0)
@@ -1127,11 +1166,11 @@ struct bnx2x {
1127#define RSS_IPV6_CAP 0x0004 1166#define RSS_IPV6_CAP 0x0004
1128#define RSS_IPV6_TCP_CAP 0x0008 1167#define RSS_IPV6_TCP_CAP 0x0008
1129 1168
1130#define BNX2X_MAX_QUEUES(bp) (IS_MF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
1131 : MAX_CONTEXT)
1132#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1169#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1133#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1170#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
1134 1171
1172#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE)
1173#define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1)
1135 1174
1136#define RSS_IPV4_CAP_MASK \ 1175#define RSS_IPV4_CAP_MASK \
1137 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY 1176 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
@@ -1342,14 +1381,40 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1342 1381
1343 1382
1344/* DMAE command defines */ 1383/* DMAE command defines */
1345#define DMAE_CMD_SRC_PCI 0 1384#define DMAE_TIMEOUT -1
1346#define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC 1385#define DMAE_PCI_ERROR -2 /* E2 and onward */
1386#define DMAE_NOT_RDY -3
1387#define DMAE_PCI_ERR_FLAG 0x80000000
1388
1389#define DMAE_SRC_PCI 0
1390#define DMAE_SRC_GRC 1
1391
1392#define DMAE_DST_NONE 0
1393#define DMAE_DST_PCI 1
1394#define DMAE_DST_GRC 2
1395
1396#define DMAE_COMP_PCI 0
1397#define DMAE_COMP_GRC 1
1398
1399/* E2 and onward - PCI error handling in the completion */
1400
1401#define DMAE_COMP_REGULAR 0
1402#define DMAE_COM_SET_ERR 1
1347 1403
1348#define DMAE_CMD_DST_PCI (1 << DMAE_COMMAND_DST_SHIFT) 1404#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \
1349#define DMAE_CMD_DST_GRC (2 << DMAE_COMMAND_DST_SHIFT) 1405 DMAE_COMMAND_SRC_SHIFT)
1406#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \
1407 DMAE_COMMAND_SRC_SHIFT)
1350 1408
1351#define DMAE_CMD_C_DST_PCI 0 1409#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \
1352#define DMAE_CMD_C_DST_GRC (1 << DMAE_COMMAND_C_DST_SHIFT) 1410 DMAE_COMMAND_DST_SHIFT)
1411#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \
1412 DMAE_COMMAND_DST_SHIFT)
1413
1414#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \
1415 DMAE_COMMAND_C_DST_SHIFT)
1416#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \
1417 DMAE_COMMAND_C_DST_SHIFT)
1353 1418
1354#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE 1419#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
1355 1420
@@ -1365,10 +1430,20 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1365#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET 1430#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
1366#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT 1431#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
1367 1432
1433#define DMAE_SRC_PF 0
1434#define DMAE_SRC_VF 1
1435
1436#define DMAE_DST_PF 0
1437#define DMAE_DST_VF 1
1438
1439#define DMAE_C_SRC 0
1440#define DMAE_C_DST 1
1441
1368#define DMAE_LEN32_RD_MAX 0x80 1442#define DMAE_LEN32_RD_MAX 0x80
1369#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) 1443#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
1370 1444
1371#define DMAE_COMP_VAL 0xe0d0d0ae 1445#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
1446 indicates eror */
1372 1447
1373#define MAX_DMAE_C_PER_PORT 8 1448#define MAX_DMAE_C_PER_PORT 8
1374#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1449#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
@@ -1534,6 +1609,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1534#define GET_FLAG(value, mask) \ 1609#define GET_FLAG(value, mask) \
1535 (((value) &= (mask)) >> (mask##_SHIFT)) 1610 (((value) &= (mask)) >> (mask##_SHIFT))
1536 1611
1612#define GET_FIELD(value, fname) \
1613 (((value) & (fname##_MASK)) >> (fname##_SHIFT))
1614
1537#define CAM_IS_INVALID(x) \ 1615#define CAM_IS_INVALID(x) \
1538 (GET_FLAG(x.flags, \ 1616 (GET_FLAG(x.flags, \
1539 MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \ 1617 MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
@@ -1553,6 +1631,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1553#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 1631#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
1554#endif 1632#endif
1555 1633
1634#ifndef ETH_MAX_RX_CLIENTS_E2
1635#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
1636#endif
1556#define BNX2X_VPD_LEN 128 1637#define BNX2X_VPD_LEN 128
1557#define VENDOR_ID_LEN 4 1638#define VENDOR_ID_LEN 4
1558 1639
@@ -1570,13 +1651,18 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1570#define BNX2X_EXTERN extern 1651#define BNX2X_EXTERN extern
1571#endif 1652#endif
1572 1653
1573BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */ 1654BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
1574 1655
1575/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */ 1656/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
1576 1657
1577extern void bnx2x_set_ethtool_ops(struct net_device *netdev); 1658extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1578 1659
1579void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); 1660void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1661u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
1662u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
1663u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
1664 bool with_comp, u8 comp_type);
1665
1580 1666
1581#define WAIT_RAMROD_POLL 0x01 1667#define WAIT_RAMROD_POLL 0x01
1582#define WAIT_RAMROD_COMMON 0x02 1668#define WAIT_RAMROD_COMMON 0x02
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index ae05987e647e..cffa778ec5bf 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -18,7 +18,7 @@
18 18
19#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
20#include <linux/ip.h> 20#include <linux/ip.h>
21#include <linux/ipv6.h> 21#include <net/ipv6.h>
22#include <net/ip6_checksum.h> 22#include <net/ip6_checksum.h>
23#include <linux/firmware.h> 23#include <linux/firmware.h>
24#include "bnx2x_cmn.h" 24#include "bnx2x_cmn.h"
@@ -118,16 +118,10 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
118 118
119 pkt_cons = TX_BD(sw_cons); 119 pkt_cons = TX_BD(sw_cons);
120 120
121 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */ 121 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
122 " pkt_cons %u\n",
123 fp->index, hw_cons, sw_cons, pkt_cons);
122 124
123 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
124 hw_cons, sw_cons, pkt_cons);
125
126/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
127 rmb();
128 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
129 }
130*/
131 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); 125 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
132 sw_cons++; 126 sw_cons++;
133 } 127 }
@@ -749,8 +743,9 @@ void bnx2x_link_report(struct bnx2x *bp)
749 u16 vn_max_rate; 743 u16 vn_max_rate;
750 744
751 vn_max_rate = 745 vn_max_rate =
752 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> 746 ((bp->mf_config[BP_VN(bp)] &
753 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 747 FUNC_MF_CFG_MAX_BW_MASK) >>
748 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
754 if (vn_max_rate < line_speed) 749 if (vn_max_rate < line_speed)
755 line_speed = vn_max_rate; 750 line_speed = vn_max_rate;
756 } 751 }
@@ -912,14 +907,15 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
912 if (j != 0) 907 if (j != 0)
913 continue; 908 continue;
914 909
915 REG_WR(bp, BAR_USTRORM_INTMEM + 910 if (!CHIP_IS_E2(bp)) {
916 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), 911 REG_WR(bp, BAR_USTRORM_INTMEM +
917 U64_LO(fp->rx_comp_mapping)); 912 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
918 REG_WR(bp, BAR_USTRORM_INTMEM + 913 U64_LO(fp->rx_comp_mapping));
919 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, 914 REG_WR(bp, BAR_USTRORM_INTMEM +
920 U64_HI(fp->rx_comp_mapping)); 915 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
916 U64_HI(fp->rx_comp_mapping));
917 }
921 } 918 }
922
923} 919}
924static void bnx2x_free_tx_skbs(struct bnx2x *bp) 920static void bnx2x_free_tx_skbs(struct bnx2x *bp)
925{ 921{
@@ -1308,23 +1304,27 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1308 } 1304 }
1309 1305
1310 } else { 1306 } else {
1307 int path = BP_PATH(bp);
1311 int port = BP_PORT(bp); 1308 int port = BP_PORT(bp);
1312 1309
1313 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n", 1310 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1314 load_count[0], load_count[1], load_count[2]); 1311 path, load_count[path][0], load_count[path][1],
1315 load_count[0]++; 1312 load_count[path][2]);
1316 load_count[1 + port]++; 1313 load_count[path][0]++;
1317 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n", 1314 load_count[path][1 + port]++;
1318 load_count[0], load_count[1], load_count[2]); 1315 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1319 if (load_count[0] == 1) 1316 path, load_count[path][0], load_count[path][1],
1317 load_count[path][2]);
1318 if (load_count[path][0] == 1)
1320 load_code = FW_MSG_CODE_DRV_LOAD_COMMON; 1319 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1321 else if (load_count[1 + port] == 1) 1320 else if (load_count[path][1 + port] == 1)
1322 load_code = FW_MSG_CODE_DRV_LOAD_PORT; 1321 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1323 else 1322 else
1324 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; 1323 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1325 } 1324 }
1326 1325
1327 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 1326 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1327 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1328 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) 1328 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1329 bp->port.pmf = 1; 1329 bp->port.pmf = 1;
1330 else 1330 else
@@ -1349,7 +1349,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1349 /* Setup NIC internals and enable interrupts */ 1349 /* Setup NIC internals and enable interrupts */
1350 bnx2x_nic_init(bp, load_code); 1350 bnx2x_nic_init(bp, load_code);
1351 1351
1352 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) && 1352 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1353 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1353 (bp->common.shmem2_base)) 1354 (bp->common.shmem2_base))
1354 SHMEM2_WR(bp, dcc_support, 1355 SHMEM2_WR(bp, dcc_support,
1355 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | 1356 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
@@ -1389,11 +1390,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1389#endif 1390#endif
1390 } 1391 }
1391 1392
1392 if (CHIP_IS_E1H(bp)) 1393 if (!CHIP_IS_E1(bp) &&
1393 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 1394 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1394 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 1395 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1395 bp->flags |= MF_FUNC_DIS; 1396 bp->flags |= MF_FUNC_DIS;
1396 } 1397 }
1397 1398
1398#ifdef BCM_CNIC 1399#ifdef BCM_CNIC
1399 /* Enable Timer scan */ 1400 /* Enable Timer scan */
@@ -1527,8 +1528,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1527 bp->rx_mode = BNX2X_RX_MODE_NONE; 1528 bp->rx_mode = BNX2X_RX_MODE_NONE;
1528 bnx2x_set_storm_rx_mode(bp); 1529 bnx2x_set_storm_rx_mode(bp);
1529 1530
1531 /* Stop Tx */
1532 bnx2x_tx_disable(bp);
1530 del_timer_sync(&bp->timer); 1533 del_timer_sync(&bp->timer);
1531 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 1534 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1532 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 1535 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1533 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1536 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1534 1537
@@ -1855,6 +1858,120 @@ exit_lbl:
1855} 1858}
1856#endif 1859#endif
1857 1860
1861static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1862 struct eth_tx_parse_bd_e2 *pbd,
1863 u32 xmit_type)
1864{
1865 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1866 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1867 if ((xmit_type & XMIT_GSO_V6) &&
1868 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1869 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1870}
1871
1872/**
1873 * Update PBD in GSO case.
1874 *
1875 * @param skb
1876 * @param tx_start_bd
1877 * @param pbd
1878 * @param xmit_type
1879 */
1880static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1881 struct eth_tx_parse_bd_e1x *pbd,
1882 u32 xmit_type)
1883{
1884 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1885 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1886 pbd->tcp_flags = pbd_tcp_flags(skb);
1887
1888 if (xmit_type & XMIT_GSO_V4) {
1889 pbd->ip_id = swab16(ip_hdr(skb)->id);
1890 pbd->tcp_pseudo_csum =
1891 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1892 ip_hdr(skb)->daddr,
1893 0, IPPROTO_TCP, 0));
1894
1895 } else
1896 pbd->tcp_pseudo_csum =
1897 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1898 &ipv6_hdr(skb)->daddr,
1899 0, IPPROTO_TCP, 0));
1900
1901 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1902}
1903/**
1904 *
1905 * @param skb
1906 * @param tx_start_bd
1907 * @param pbd_e2
1908 * @param xmit_type
1909 *
1910 * @return header len
1911 */
1912static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1913 struct eth_tx_parse_bd_e2 *pbd,
1914 u32 xmit_type)
1915{
1916 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1917 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1918
1919 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1920 skb->data) / 2) <<
1921 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1922
1923 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1924}
1925
1926/**
1927 *
1928 * @param skb
1929 * @param tx_start_bd
1930 * @param pbd
1931 * @param xmit_type
1932 *
1933 * @return Header length
1934 */
1935static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1936 struct eth_tx_parse_bd_e1x *pbd,
1937 u32 xmit_type)
1938{
1939 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1940
1941 /* for now NS flag is not used in Linux */
1942 pbd->global_data =
1943 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1944 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1945
1946 pbd->ip_hlen_w = (skb_transport_header(skb) -
1947 skb_network_header(skb)) / 2;
1948
1949 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1950
1951 pbd->total_hlen_w = cpu_to_le16(hlen);
1952 hlen = hlen*2;
1953
1954 if (xmit_type & XMIT_CSUM_TCP) {
1955 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1956
1957 } else {
1958 s8 fix = SKB_CS_OFF(skb); /* signed! */
1959
1960 DP(NETIF_MSG_TX_QUEUED,
1961 "hlen %d fix %d csum before fix %x\n",
1962 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1963
1964 /* HW bug: fixup the CSUM */
1965 pbd->tcp_pseudo_csum =
1966 bnx2x_csum_fix(skb_transport_header(skb),
1967 SKB_CS(skb), fix);
1968
1969 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1970 pbd->tcp_pseudo_csum);
1971 }
1972
1973 return hlen;
1974}
1858/* called with netif_tx_lock 1975/* called with netif_tx_lock
1859 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 1976 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1860 * netif_wake_queue() 1977 * netif_wake_queue()
@@ -1868,6 +1985,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1868 struct eth_tx_start_bd *tx_start_bd; 1985 struct eth_tx_start_bd *tx_start_bd;
1869 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 1986 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1870 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 1987 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1988 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1871 u16 pkt_prod, bd_prod; 1989 u16 pkt_prod, bd_prod;
1872 int nbd, fp_index; 1990 int nbd, fp_index;
1873 dma_addr_t mapping; 1991 dma_addr_t mapping;
@@ -1895,9 +2013,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1895 return NETDEV_TX_BUSY; 2013 return NETDEV_TX_BUSY;
1896 } 2014 }
1897 2015
1898 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)" 2016 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1899 " gso type %x xmit_type %x\n", 2017 "protocol(%x,%x) gso type %x xmit_type %x\n",
1900 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 2018 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1901 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 2019 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1902 2020
1903 eth = (struct ethhdr *)skb->data; 2021 eth = (struct ethhdr *)skb->data;
@@ -1988,44 +2106,21 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1988 tx_start_bd->bd_flags.as_bitfield |= 2106 tx_start_bd->bd_flags.as_bitfield |=
1989 ETH_TX_BD_FLAGS_IS_UDP; 2107 ETH_TX_BD_FLAGS_IS_UDP;
1990 } 2108 }
1991 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
1992 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
1993 /* Set PBD in checksum offload case */
1994 if (xmit_type & XMIT_CSUM) {
1995 hlen = (skb_network_header(skb) - skb->data) / 2;
1996 2109
1997 /* for now NS flag is not used in Linux */ 2110 if (CHIP_IS_E2(bp)) {
1998 pbd_e1x->global_data = 2111 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
1999 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << 2112 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2000 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 2113 /* Set PBD in checksum offload case */
2001 2114 if (xmit_type & XMIT_CSUM)
2002 pbd_e1x->ip_hlen_w = (skb_transport_header(skb) - 2115 hlen = bnx2x_set_pbd_csum_e2(bp,
2003 skb_network_header(skb)) / 2; 2116 skb, pbd_e2, xmit_type);
2004 2117 } else {
2005 hlen += pbd_e1x->ip_hlen_w + tcp_hdrlen(skb) / 2; 2118 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2006 2119 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2007 pbd_e1x->total_hlen_w = cpu_to_le16(hlen); 2120 /* Set PBD in checksum offload case */
2008 hlen = hlen*2; 2121 if (xmit_type & XMIT_CSUM)
2009 2122 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2010 if (xmit_type & XMIT_CSUM_TCP) {
2011 pbd_e1x->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2012
2013 } else {
2014 s8 fix = SKB_CS_OFF(skb); /* signed! */
2015
2016 DP(NETIF_MSG_TX_QUEUED,
2017 "hlen %d fix %d csum before fix %x\n",
2018 le16_to_cpu(pbd_e1x->total_hlen_w),
2019 fix, SKB_CS(skb));
2020
2021 /* HW bug: fixup the CSUM */
2022 pbd_e1x->tcp_pseudo_csum =
2023 bnx2x_csum_fix(skb_transport_header(skb),
2024 SKB_CS(skb), fix);
2025 2123
2026 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2027 pbd_e1x->tcp_pseudo_csum);
2028 }
2029 } 2124 }
2030 2125
2031 mapping = dma_map_single(&bp->pdev->dev, skb->data, 2126 mapping = dma_map_single(&bp->pdev->dev, skb->data,
@@ -2057,26 +2152,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2057 if (unlikely(skb_headlen(skb) > hlen)) 2152 if (unlikely(skb_headlen(skb) > hlen))
2058 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, 2153 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2059 hlen, bd_prod, ++nbd); 2154 hlen, bd_prod, ++nbd);
2060 2155 if (CHIP_IS_E2(bp))
2061 pbd_e1x->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 2156 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2062 pbd_e1x->tcp_send_seq = swab32(tcp_hdr(skb)->seq); 2157 else
2063 pbd_e1x->tcp_flags = pbd_tcp_flags(skb); 2158 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2064
2065 if (xmit_type & XMIT_GSO_V4) {
2066 pbd_e1x->ip_id = swab16(ip_hdr(skb)->id);
2067 pbd_e1x->tcp_pseudo_csum =
2068 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2069 ip_hdr(skb)->daddr,
2070 0, IPPROTO_TCP, 0));
2071
2072 } else
2073 pbd_e1x->tcp_pseudo_csum =
2074 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2075 &ipv6_hdr(skb)->daddr,
2076 0, IPPROTO_TCP, 0));
2077
2078 pbd_e1x->global_data |=
2079 ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2080 } 2159 }
2081 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 2160 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2082 2161
@@ -2124,7 +2203,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2124 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, 2203 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2125 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, 2204 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2126 le16_to_cpu(pbd_e1x->total_hlen_w)); 2205 le16_to_cpu(pbd_e1x->total_hlen_w));
2127 2206 if (pbd_e2)
2207 DP(NETIF_MSG_TX_QUEUED,
2208 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2209 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2210 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2211 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2212 pbd_e2->parsing_data);
2128 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 2213 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2129 2214
2130 /* 2215 /*
@@ -2327,6 +2412,8 @@ int bnx2x_resume(struct pci_dev *pdev)
2327 bnx2x_set_power_state(bp, PCI_D0); 2412 bnx2x_set_power_state(bp, PCI_D0);
2328 netif_device_attach(dev); 2413 netif_device_attach(dev);
2329 2414
2415 /* Since the chip was reset, clear the FW sequence number */
2416 bp->fw_seq = 0;
2330 rc = bnx2x_nic_load(bp, LOAD_OPEN); 2417 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2331 2418
2332 rtnl_unlock(); 2419 rtnl_unlock();
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 2fb9045833e1..41d0a177db7f 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -366,10 +366,77 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
366 fp->index, bd_prod, rx_comp_prod, rx_sge_prod); 366 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
367} 367}
368 368
369static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
370 u8 segment, u16 index, u8 op,
371 u8 update, u32 igu_addr)
372{
373 struct igu_regular cmd_data = {0};
374
375 cmd_data.sb_id_and_flags =
376 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
377 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
378 (update << IGU_REGULAR_BUPDATE_SHIFT) |
379 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
369 380
381 DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
382 cmd_data.sb_id_and_flags, igu_addr);
383 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
384
385 /* Make sure that ACK is written */
386 mmiowb();
387 barrier();
388}
370 389
371static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 390static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
372 u8 storm, u16 index, u8 op, u8 update) 391 u8 idu_sb_id, bool is_Pf)
392{
393 u32 data, ctl, cnt = 100;
394 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
395 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
396 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
397 u32 sb_bit = 1 << (idu_sb_id%32);
398 u32 func_encode = BP_FUNC(bp) |
399 ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
400 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
401
402 /* Not supported in BC mode */
403 if (CHIP_INT_MODE_IS_BC(bp))
404 return;
405
406 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
407 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
408 IGU_REGULAR_CLEANUP_SET |
409 IGU_REGULAR_BCLEANUP;
410
411 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
412 func_encode << IGU_CTRL_REG_FID_SHIFT |
413 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
414
415 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
416 data, igu_addr_data);
417 REG_WR(bp, igu_addr_data, data);
418 mmiowb();
419 barrier();
420 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
421 ctl, igu_addr_ctl);
422 REG_WR(bp, igu_addr_ctl, ctl);
423 mmiowb();
424 barrier();
425
426 /* wait for clean up to finish */
427 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
428 msleep(20);
429
430
431 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
432 DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
433 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
434 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
435 }
436}
437
438static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
439 u8 storm, u16 index, u8 op, u8 update)
373{ 440{
374 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 441 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
375 COMMAND_REG_INT_ACK); 442 COMMAND_REG_INT_ACK);
@@ -390,7 +457,37 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
390 mmiowb(); 457 mmiowb();
391 barrier(); 458 barrier();
392} 459}
393static inline u16 bnx2x_ack_int(struct bnx2x *bp) 460
461static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
462 u16 index, u8 op, u8 update)
463{
464 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
465
466 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
467 igu_addr);
468}
469
470static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
471 u16 index, u8 op, u8 update)
472{
473 if (bp->common.int_block == INT_BLOCK_HC)
474 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
475 else {
476 u8 segment;
477
478 if (CHIP_INT_MODE_IS_BC(bp))
479 segment = storm;
480 else if (igu_sb_id != bp->igu_dsb_id)
481 segment = IGU_SEG_ACCESS_DEF;
482 else if (storm == ATTENTION_ID)
483 segment = IGU_SEG_ACCESS_ATTN;
484 else
485 segment = IGU_SEG_ACCESS_DEF;
486 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
487 }
488}
489
490static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
394{ 491{
395 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 492 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
396 COMMAND_REG_SIMD_MASK); 493 COMMAND_REG_SIMD_MASK);
@@ -399,13 +496,34 @@ static inline u16 bnx2x_ack_int(struct bnx2x *bp)
399 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", 496 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
400 result, hc_addr); 497 result, hc_addr);
401 498
499 barrier();
500 return result;
501}
502
503static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
504{
505 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
506 u32 result = REG_RD(bp, igu_addr);
507
508 DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
509 result, igu_addr);
510
511 barrier();
402 return result; 512 return result;
403} 513}
404 514
515static inline u16 bnx2x_ack_int(struct bnx2x *bp)
516{
517 barrier();
518 if (bp->common.int_block == INT_BLOCK_HC)
519 return bnx2x_hc_ack_int(bp);
520 else
521 return bnx2x_igu_ack_int(bp);
522}
523
405/* 524/*
406 * fast path service functions 525 * fast path service functions
407 */ 526 */
408
409static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) 527static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
410{ 528{
411 /* Tell compiler that consumer and producer can change */ 529 /* Tell compiler that consumer and producer can change */
@@ -456,6 +574,17 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
456 rx_cons_sb++; 574 rx_cons_sb++;
457 return (fp->rx_comp_cons != rx_cons_sb); 575 return (fp->rx_comp_cons != rx_cons_sb);
458} 576}
577/**
578 * disables tx from stack point of view
579 *
580 * @param bp
581 */
582static inline void bnx2x_tx_disable(struct bnx2x *bp)
583{
584 netif_tx_disable(bp->dev);
585 netif_carrier_off(bp->dev);
586}
587
459static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 588static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
460 struct bnx2x_fastpath *fp, u16 index) 589 struct bnx2x_fastpath *fp, u16 index)
461{ 590{
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index 3bb9a91bb3f7..dc18c25ca9e5 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -31,14 +31,24 @@ struct dump_sign {
31 31
32#define RI_E1 0x1 32#define RI_E1 0x1
33#define RI_E1H 0x2 33#define RI_E1H 0x2
34#define RI_E2 0x4
34#define RI_ONLINE 0x100 35#define RI_ONLINE 0x100
35 36#define RI_PATH0_DUMP 0x200
37#define RI_PATH1_DUMP 0x400
36#define RI_E1_OFFLINE (RI_E1) 38#define RI_E1_OFFLINE (RI_E1)
37#define RI_E1_ONLINE (RI_E1 | RI_ONLINE) 39#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
38#define RI_E1H_OFFLINE (RI_E1H) 40#define RI_E1H_OFFLINE (RI_E1H)
39#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) 41#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
40#define RI_ALL_OFFLINE (RI_E1 | RI_E1H) 42#define RI_E2_OFFLINE (RI_E2)
41#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) 43#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
44#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
45#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
46#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
47#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
48#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
49#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
50#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
51#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
42 52
43#define MAX_TIMER_PENDING 200 53#define MAX_TIMER_PENDING 200
44#define TIMER_SCAN_DONT_CARE 0xFF 54#define TIMER_SCAN_DONT_CARE 0xFF
@@ -513,6 +523,12 @@ static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = {
513 { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE } 523 { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE }
514}; 524};
515 525
526#define WREGS_COUNT_E2 1
527static const u32 read_reg_e2_0[] = { 0x1b1040, 0x1b1000 };
528
529static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
530 { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
531};
516 532
517static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 }; 533static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 };
518 534
@@ -531,4 +547,17 @@ static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
531 { 0x1640d0, 0x1640d4 }; 547 { 0x1640d0, 0x1640d4 };
532 548
533 549
550#define PAGE_MODE_VALUES_E2 2
551
552#define PAGE_READ_REGS_E2 1
553
554#define PAGE_WRITE_REGS_E2 1
555
556static const u32 page_vals_e2[PAGE_MODE_VALUES_E2] = { 0, 128 };
557
558static const u32 page_write_regs_e2[PAGE_WRITE_REGS_E2] = { 328476 };
559
560static const struct reg_addr page_read_regs_e2[PAGE_READ_REGS_E2] = {
561 { 0x58000, 4608, RI_E2_ONLINE } };
562
534#endif /* BNX2X_DUMP_H */ 563#endif /* BNX2X_DUMP_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index fa8f9526f93c..8fb00276dc41 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -41,19 +41,19 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
41 (bp->link_vars.link_up)) { 41 (bp->link_vars.link_up)) {
42 cmd->speed = bp->link_vars.line_speed; 42 cmd->speed = bp->link_vars.line_speed;
43 cmd->duplex = bp->link_vars.duplex; 43 cmd->duplex = bp->link_vars.duplex;
44 if (IS_MF(bp)) {
45 u16 vn_max_rate;
46
47 vn_max_rate =
48 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
49 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
50 if (vn_max_rate < cmd->speed)
51 cmd->speed = vn_max_rate;
52 }
53 } else { 44 } else {
45
54 cmd->speed = bp->link_params.req_line_speed[cfg_idx]; 46 cmd->speed = bp->link_params.req_line_speed[cfg_idx];
55 cmd->duplex = bp->link_params.req_duplex[cfg_idx]; 47 cmd->duplex = bp->link_params.req_duplex[cfg_idx];
56 } 48 }
49 if (IS_MF(bp)) {
50 u16 vn_max_rate = ((bp->mf_config[BP_VN(bp)] &
51 FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT) *
52 100;
53
54 if (vn_max_rate < cmd->speed)
55 cmd->speed = vn_max_rate;
56 }
57 57
58 if (bp->port.supported[cfg_idx] & SUPPORTED_TP) 58 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
59 cmd->port = PORT_TP; 59 cmd->port = PORT_TP;
@@ -298,6 +298,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
298 298
299#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE) 299#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
300#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE) 300#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
301#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
301 302
302static int bnx2x_get_regs_len(struct net_device *dev) 303static int bnx2x_get_regs_len(struct net_device *dev)
303{ 304{
@@ -315,7 +316,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
315 regdump_len += wreg_addrs_e1[i].size * 316 regdump_len += wreg_addrs_e1[i].size *
316 (1 + wreg_addrs_e1[i].read_regs_count); 317 (1 + wreg_addrs_e1[i].read_regs_count);
317 318
318 } else { /* E1H */ 319 } else if (CHIP_IS_E1H(bp)) {
319 for (i = 0; i < REGS_COUNT; i++) 320 for (i = 0; i < REGS_COUNT; i++)
320 if (IS_E1H_ONLINE(reg_addrs[i].info)) 321 if (IS_E1H_ONLINE(reg_addrs[i].info))
321 regdump_len += reg_addrs[i].size; 322 regdump_len += reg_addrs[i].size;
@@ -324,6 +325,15 @@ static int bnx2x_get_regs_len(struct net_device *dev)
324 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info)) 325 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
325 regdump_len += wreg_addrs_e1h[i].size * 326 regdump_len += wreg_addrs_e1h[i].size *
326 (1 + wreg_addrs_e1h[i].read_regs_count); 327 (1 + wreg_addrs_e1h[i].read_regs_count);
328 } else if (CHIP_IS_E2(bp)) {
329 for (i = 0; i < REGS_COUNT; i++)
330 if (IS_E2_ONLINE(reg_addrs[i].info))
331 regdump_len += reg_addrs[i].size;
332
333 for (i = 0; i < WREGS_COUNT_E2; i++)
334 if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
335 regdump_len += wreg_addrs_e2[i].size *
336 (1 + wreg_addrs_e2[i].read_regs_count);
327 } 337 }
328 regdump_len *= 4; 338 regdump_len *= 4;
329 regdump_len += sizeof(struct dump_hdr); 339 regdump_len += sizeof(struct dump_hdr);
@@ -331,6 +341,23 @@ static int bnx2x_get_regs_len(struct net_device *dev)
331 return regdump_len; 341 return regdump_len;
332} 342}
333 343
344static inline void bnx2x_read_pages_regs_e2(struct bnx2x *bp, u32 *p)
345{
346 u32 i, j, k, n;
347
348 for (i = 0; i < PAGE_MODE_VALUES_E2; i++) {
349 for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
350 REG_WR(bp, page_write_regs_e2[j], page_vals_e2[i]);
351 for (k = 0; k < PAGE_READ_REGS_E2; k++)
352 if (IS_E2_ONLINE(page_read_regs_e2[k].info))
353 for (n = 0; n <
354 page_read_regs_e2[k].size; n++)
355 *p++ = REG_RD(bp,
356 page_read_regs_e2[k].addr + n*4);
357 }
358 }
359}
360
334static void bnx2x_get_regs(struct net_device *dev, 361static void bnx2x_get_regs(struct net_device *dev,
335 struct ethtool_regs *regs, void *_p) 362 struct ethtool_regs *regs, void *_p)
336{ 363{
@@ -350,7 +377,14 @@ static void bnx2x_get_regs(struct net_device *dev,
350 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR); 377 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
351 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR); 378 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
352 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR); 379 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
353 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE; 380
381 if (CHIP_IS_E1(bp))
382 dump_hdr.info = RI_E1_ONLINE;
383 else if (CHIP_IS_E1H(bp))
384 dump_hdr.info = RI_E1H_ONLINE;
385 else if (CHIP_IS_E2(bp))
386 dump_hdr.info = RI_E2_ONLINE |
387 (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
354 388
355 memcpy(p, &dump_hdr, sizeof(struct dump_hdr)); 389 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
356 p += dump_hdr.hdr_size + 1; 390 p += dump_hdr.hdr_size + 1;
@@ -362,16 +396,25 @@ static void bnx2x_get_regs(struct net_device *dev,
362 *p++ = REG_RD(bp, 396 *p++ = REG_RD(bp,
363 reg_addrs[i].addr + j*4); 397 reg_addrs[i].addr + j*4);
364 398
365 } else { /* E1H */ 399 } else if (CHIP_IS_E1H(bp)) {
366 for (i = 0; i < REGS_COUNT; i++) 400 for (i = 0; i < REGS_COUNT; i++)
367 if (IS_E1H_ONLINE(reg_addrs[i].info)) 401 if (IS_E1H_ONLINE(reg_addrs[i].info))
368 for (j = 0; j < reg_addrs[i].size; j++) 402 for (j = 0; j < reg_addrs[i].size; j++)
369 *p++ = REG_RD(bp, 403 *p++ = REG_RD(bp,
370 reg_addrs[i].addr + j*4); 404 reg_addrs[i].addr + j*4);
405
406 } else if (CHIP_IS_E2(bp)) {
407 for (i = 0; i < REGS_COUNT; i++)
408 if (IS_E2_ONLINE(reg_addrs[i].info))
409 for (j = 0; j < reg_addrs[i].size; j++)
410 *p++ = REG_RD(bp,
411 reg_addrs[i].addr + j*4);
412
413 bnx2x_read_pages_regs_e2(bp, p);
371 } 414 }
372} 415}
373 416
374#define PHY_FW_VER_LEN 10 417#define PHY_FW_VER_LEN 20
375 418
376static void bnx2x_get_drvinfo(struct net_device *dev, 419static void bnx2x_get_drvinfo(struct net_device *dev,
377 struct ethtool_drvinfo *info) 420 struct ethtool_drvinfo *info)
@@ -474,7 +517,7 @@ static u32 bnx2x_get_link(struct net_device *dev)
474{ 517{
475 struct bnx2x *bp = netdev_priv(dev); 518 struct bnx2x *bp = netdev_priv(dev);
476 519
477 if (bp->flags & MF_FUNC_DIS) 520 if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
478 return 0; 521 return 0;
479 522
480 return bp->link_vars.link_up; 523 return bp->link_vars.link_up;
@@ -1235,6 +1278,9 @@ static int bnx2x_test_registers(struct bnx2x *bp)
1235 1278
1236 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { 1279 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
1237 u32 offset, mask, save_val, val; 1280 u32 offset, mask, save_val, val;
1281 if (CHIP_IS_E2(bp) &&
1282 reg_tbl[i].offset0 == HC_REG_AGG_INT_0)
1283 continue;
1238 1284
1239 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; 1285 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
1240 mask = reg_tbl[i].mask; 1286 mask = reg_tbl[i].mask;
@@ -1286,20 +1332,33 @@ static int bnx2x_test_memory(struct bnx2x *bp)
1286 u32 offset; 1332 u32 offset;
1287 u32 e1_mask; 1333 u32 e1_mask;
1288 u32 e1h_mask; 1334 u32 e1h_mask;
1335 u32 e2_mask;
1289 } prty_tbl[] = { 1336 } prty_tbl[] = {
1290 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 }, 1337 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0, 0 },
1291 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 }, 1338 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2, 0 },
1292 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 }, 1339 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0, 0 },
1293 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 }, 1340 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0, 0 },
1294 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 }, 1341 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0, 0 },
1295 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 }, 1342 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0, 0 },
1296 1343
1297 { NULL, 0xffffffff, 0, 0 } 1344 { NULL, 0xffffffff, 0, 0, 0 }
1298 }; 1345 };
1299 1346
1300 if (!netif_running(bp->dev)) 1347 if (!netif_running(bp->dev))
1301 return rc; 1348 return rc;
1302 1349
1350 /* pre-Check the parity status */
1351 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
1352 val = REG_RD(bp, prty_tbl[i].offset);
1353 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
1354 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
1355 (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
1356 DP(NETIF_MSG_HW,
1357 "%s is 0x%x\n", prty_tbl[i].name, val);
1358 goto test_mem_exit;
1359 }
1360 }
1361
1303 /* Go through all the memories */ 1362 /* Go through all the memories */
1304 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) 1363 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
1305 for (j = 0; j < mem_tbl[i].size; j++) 1364 for (j = 0; j < mem_tbl[i].size; j++)
@@ -1309,7 +1368,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
1309 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { 1368 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
1310 val = REG_RD(bp, prty_tbl[i].offset); 1369 val = REG_RD(bp, prty_tbl[i].offset);
1311 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || 1370 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
1312 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) { 1371 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
1372 (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
1313 DP(NETIF_MSG_HW, 1373 DP(NETIF_MSG_HW,
1314 "%s is 0x%x\n", prty_tbl[i].name, val); 1374 "%s is 0x%x\n", prty_tbl[i].name, val);
1315 goto test_mem_exit; 1375 goto test_mem_exit;
@@ -1324,7 +1384,7 @@ test_mem_exit:
1324 1384
1325static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) 1385static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
1326{ 1386{
1327 int cnt = 1000; 1387 int cnt = 1400;
1328 1388
1329 if (link_up) 1389 if (link_up)
1330 while (bnx2x_link_test(bp, is_serdes) && cnt--) 1390 while (bnx2x_link_test(bp, is_serdes) && cnt--)
@@ -1343,7 +1403,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1343 u16 pkt_prod, bd_prod; 1403 u16 pkt_prod, bd_prod;
1344 struct sw_tx_bd *tx_buf; 1404 struct sw_tx_bd *tx_buf;
1345 struct eth_tx_start_bd *tx_start_bd; 1405 struct eth_tx_start_bd *tx_start_bd;
1346 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 1406 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1407 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1347 dma_addr_t mapping; 1408 dma_addr_t mapping;
1348 union eth_rx_cqe *cqe; 1409 union eth_rx_cqe *cqe;
1349 u8 cqe_fp_flags; 1410 u8 cqe_fp_flags;
@@ -1411,7 +1472,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1411 /* turn on parsing and get a BD */ 1472 /* turn on parsing and get a BD */
1412 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 1473 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1413 pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x; 1474 pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x;
1475 pbd_e2 = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e2;
1414 1476
1477 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
1415 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 1478 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
1416 1479
1417 wmb(); 1480 wmb();
@@ -1431,6 +1494,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1431 if (tx_idx != tx_start_idx + num_pkts) 1494 if (tx_idx != tx_start_idx + num_pkts)
1432 goto test_loopback_exit; 1495 goto test_loopback_exit;
1433 1496
1497 /* Unlike HC IGU won't generate an interrupt for status block
1498 * updates that have been performed while interrupts were
1499 * disabled.
1500 */
1501 if (bp->common.int_block == INT_BLOCK_IGU)
1502 bnx2x_tx_int(fp_tx);
1503
1434 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); 1504 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1435 if (rx_idx != rx_start_idx + num_pkts) 1505 if (rx_idx != rx_start_idx + num_pkts)
1436 goto test_loopback_exit; 1506 goto test_loopback_exit;
@@ -1573,8 +1643,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
1573 1643
1574 config->hdr.length = 0; 1644 config->hdr.length = 0;
1575 if (CHIP_IS_E1(bp)) 1645 if (CHIP_IS_E1(bp))
1576 /* use last unicast entries */ 1646 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
1577 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
1578 else 1647 else
1579 config->hdr.offset = BP_FUNC(bp); 1648 config->hdr.offset = BP_FUNC(bp);
1580 config->hdr.client_id = bp->fp->cl_id; 1649 config->hdr.client_id = bp->fp->cl_id;
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 596041cbd977..18c8e23a0e82 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -663,6 +663,7 @@ struct shm_dev_info { /* size */
663#define FUNC_7 7 663#define FUNC_7 7
664#define E1_FUNC_MAX 2 664#define E1_FUNC_MAX 2
665#define E1H_FUNC_MAX 8 665#define E1H_FUNC_MAX 8
666#define E2_FUNC_MAX 4 /* per path */
666 667
667#define VN_0 0 668#define VN_0 0
668#define VN_1 1 669#define VN_1 1
@@ -821,6 +822,9 @@ struct drv_func_mb {
821#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 822#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
822#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 823#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
823#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 824#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
825 /* Load common chip is supported from bc 6.0.0 */
826#define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000
827#define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000
824#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 828#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
825#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 829#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
826#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 830#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
@@ -1026,7 +1030,17 @@ struct shmem_region { /* SharedMem Offset (size) */
1026 1030
1027}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */ 1031}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
1028 1032
1033struct fw_flr_ack {
1034 u32 pf_ack;
1035 u32 vf_ack[1];
1036 u32 iov_dis_ack;
1037};
1029 1038
1039struct fw_flr_mb {
1040 u32 aggint;
1041 u32 opgen_addr;
1042 struct fw_flr_ack ack;
1043};
1030 1044
1031 1045
1032struct shmem2_region { 1046struct shmem2_region {
@@ -1046,7 +1060,20 @@ struct shmem2_region {
1046 * For backwards compatibility, if the mf_cfg_addr does not exist 1060 * For backwards compatibility, if the mf_cfg_addr does not exist
1047 * (the size filed is smaller than 0xc) the mf_cfg resides at the 1061 * (the size filed is smaller than 0xc) the mf_cfg resides at the
1048 * end of struct shmem_region 1062 * end of struct shmem_region
1063 */
1064 u32 mf_cfg_addr;
1065#define SHMEM_MF_CFG_ADDR_NONE 0x00000000
1066
1067 struct fw_flr_mb flr_mb;
1068 u32 reserved[3];
1069 /*
1070 * The other shmemX_base_addr holds the other path's shmem address
1071 * required for example in case of common phy init, or for path1 to know
1072 * the address of mcp debug trace which is located in offset from shmem
1073 * of path0
1049 */ 1074 */
1075 u32 other_shmem_base_addr;
1076 u32 other_shmem2_base_addr;
1050}; 1077};
1051 1078
1052 1079
@@ -1206,10 +1233,126 @@ struct bmac1_stats {
1206 u32 rx_stat_gripj_hi; 1233 u32 rx_stat_gripj_hi;
1207}; 1234};
1208 1235
1236struct bmac2_stats {
1237 u32 tx_stat_gtpk_lo; /* gtpok */
1238 u32 tx_stat_gtpk_hi; /* gtpok */
1239 u32 tx_stat_gtxpf_lo; /* gtpf */
1240 u32 tx_stat_gtxpf_hi; /* gtpf */
1241 u32 tx_stat_gtpp_lo; /* NEW BMAC2 */
1242 u32 tx_stat_gtpp_hi; /* NEW BMAC2 */
1243 u32 tx_stat_gtfcs_lo;
1244 u32 tx_stat_gtfcs_hi;
1245 u32 tx_stat_gtuca_lo; /* NEW BMAC2 */
1246 u32 tx_stat_gtuca_hi; /* NEW BMAC2 */
1247 u32 tx_stat_gtmca_lo;
1248 u32 tx_stat_gtmca_hi;
1249 u32 tx_stat_gtbca_lo;
1250 u32 tx_stat_gtbca_hi;
1251 u32 tx_stat_gtovr_lo;
1252 u32 tx_stat_gtovr_hi;
1253 u32 tx_stat_gtfrg_lo;
1254 u32 tx_stat_gtfrg_hi;
1255 u32 tx_stat_gtpkt1_lo; /* gtpkt */
1256 u32 tx_stat_gtpkt1_hi; /* gtpkt */
1257 u32 tx_stat_gt64_lo;
1258 u32 tx_stat_gt64_hi;
1259 u32 tx_stat_gt127_lo;
1260 u32 tx_stat_gt127_hi;
1261 u32 tx_stat_gt255_lo;
1262 u32 tx_stat_gt255_hi;
1263 u32 tx_stat_gt511_lo;
1264 u32 tx_stat_gt511_hi;
1265 u32 tx_stat_gt1023_lo;
1266 u32 tx_stat_gt1023_hi;
1267 u32 tx_stat_gt1518_lo;
1268 u32 tx_stat_gt1518_hi;
1269 u32 tx_stat_gt2047_lo;
1270 u32 tx_stat_gt2047_hi;
1271 u32 tx_stat_gt4095_lo;
1272 u32 tx_stat_gt4095_hi;
1273 u32 tx_stat_gt9216_lo;
1274 u32 tx_stat_gt9216_hi;
1275 u32 tx_stat_gt16383_lo;
1276 u32 tx_stat_gt16383_hi;
1277 u32 tx_stat_gtmax_lo;
1278 u32 tx_stat_gtmax_hi;
1279 u32 tx_stat_gtufl_lo;
1280 u32 tx_stat_gtufl_hi;
1281 u32 tx_stat_gterr_lo;
1282 u32 tx_stat_gterr_hi;
1283 u32 tx_stat_gtbyt_lo;
1284 u32 tx_stat_gtbyt_hi;
1285
1286 u32 rx_stat_gr64_lo;
1287 u32 rx_stat_gr64_hi;
1288 u32 rx_stat_gr127_lo;
1289 u32 rx_stat_gr127_hi;
1290 u32 rx_stat_gr255_lo;
1291 u32 rx_stat_gr255_hi;
1292 u32 rx_stat_gr511_lo;
1293 u32 rx_stat_gr511_hi;
1294 u32 rx_stat_gr1023_lo;
1295 u32 rx_stat_gr1023_hi;
1296 u32 rx_stat_gr1518_lo;
1297 u32 rx_stat_gr1518_hi;
1298 u32 rx_stat_gr2047_lo;
1299 u32 rx_stat_gr2047_hi;
1300 u32 rx_stat_gr4095_lo;
1301 u32 rx_stat_gr4095_hi;
1302 u32 rx_stat_gr9216_lo;
1303 u32 rx_stat_gr9216_hi;
1304 u32 rx_stat_gr16383_lo;
1305 u32 rx_stat_gr16383_hi;
1306 u32 rx_stat_grmax_lo;
1307 u32 rx_stat_grmax_hi;
1308 u32 rx_stat_grpkt_lo;
1309 u32 rx_stat_grpkt_hi;
1310 u32 rx_stat_grfcs_lo;
1311 u32 rx_stat_grfcs_hi;
1312 u32 rx_stat_gruca_lo;
1313 u32 rx_stat_gruca_hi;
1314 u32 rx_stat_grmca_lo;
1315 u32 rx_stat_grmca_hi;
1316 u32 rx_stat_grbca_lo;
1317 u32 rx_stat_grbca_hi;
1318 u32 rx_stat_grxpf_lo; /* grpf */
1319 u32 rx_stat_grxpf_hi; /* grpf */
1320 u32 rx_stat_grpp_lo;
1321 u32 rx_stat_grpp_hi;
1322 u32 rx_stat_grxuo_lo; /* gruo */
1323 u32 rx_stat_grxuo_hi; /* gruo */
1324 u32 rx_stat_grjbr_lo;
1325 u32 rx_stat_grjbr_hi;
1326 u32 rx_stat_grovr_lo;
1327 u32 rx_stat_grovr_hi;
1328 u32 rx_stat_grxcf_lo; /* grcf */
1329 u32 rx_stat_grxcf_hi; /* grcf */
1330 u32 rx_stat_grflr_lo;
1331 u32 rx_stat_grflr_hi;
1332 u32 rx_stat_grpok_lo;
1333 u32 rx_stat_grpok_hi;
1334 u32 rx_stat_grmeg_lo;
1335 u32 rx_stat_grmeg_hi;
1336 u32 rx_stat_grmeb_lo;
1337 u32 rx_stat_grmeb_hi;
1338 u32 rx_stat_grbyt_lo;
1339 u32 rx_stat_grbyt_hi;
1340 u32 rx_stat_grund_lo;
1341 u32 rx_stat_grund_hi;
1342 u32 rx_stat_grfrg_lo;
1343 u32 rx_stat_grfrg_hi;
1344 u32 rx_stat_grerb_lo; /* grerrbyt */
1345 u32 rx_stat_grerb_hi; /* grerrbyt */
1346 u32 rx_stat_grfre_lo; /* grfrerr */
1347 u32 rx_stat_grfre_hi; /* grfrerr */
1348 u32 rx_stat_gripj_lo;
1349 u32 rx_stat_gripj_hi;
1350};
1209 1351
1210union mac_stats { 1352union mac_stats {
1211 struct emac_stats emac_stats; 1353 struct emac_stats emac_stats;
1212 struct bmac1_stats bmac1_stats; 1354 struct bmac1_stats bmac1_stats;
1355 struct bmac2_stats bmac2_stats;
1213}; 1356};
1214 1357
1215 1358
@@ -1594,6 +1737,24 @@ union igu_consprod_reg {
1594 1737
1595 1738
1596/* 1739/*
1740 * Control register for the IGU command register
1741 */
1742struct igu_ctrl_reg {
1743 u32 ctrl_data;
1744#define IGU_CTRL_REG_ADDRESS (0xFFF<<0)
1745#define IGU_CTRL_REG_ADDRESS_SHIFT 0
1746#define IGU_CTRL_REG_FID (0x7F<<12)
1747#define IGU_CTRL_REG_FID_SHIFT 12
1748#define IGU_CTRL_REG_RESERVED (0x1<<19)
1749#define IGU_CTRL_REG_RESERVED_SHIFT 19
1750#define IGU_CTRL_REG_TYPE (0x1<<20)
1751#define IGU_CTRL_REG_TYPE_SHIFT 20
1752#define IGU_CTRL_REG_UNUSED (0x7FF<<21)
1753#define IGU_CTRL_REG_UNUSED_SHIFT 21
1754};
1755
1756
1757/*
1597 * Parser parsing flags field 1758 * Parser parsing flags field
1598 */ 1759 */
1599struct parsing_flags { 1760struct parsing_flags {
@@ -1924,6 +2085,27 @@ struct eth_tx_parse_bd_e1x {
1924}; 2085};
1925 2086
1926/* 2087/*
2088 * Tx parsing BD structure for ETH E2
2089 */
2090struct eth_tx_parse_bd_e2 {
2091 __le16 dst_mac_addr_lo;
2092 __le16 dst_mac_addr_mid;
2093 __le16 dst_mac_addr_hi;
2094 __le16 src_mac_addr_lo;
2095 __le16 src_mac_addr_mid;
2096 __le16 src_mac_addr_hi;
2097 __le32 parsing_data;
2098#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
2099#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
2100#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
2101#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
2102#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
2103#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
2104#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
2105#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
2106};
2107
2108/*
1927 * The last BD in the BD memory will hold a pointer to the next BD memory 2109 * The last BD in the BD memory will hold a pointer to the next BD memory
1928 */ 2110 */
1929struct eth_tx_next_bd { 2111struct eth_tx_next_bd {
@@ -1939,6 +2121,7 @@ union eth_tx_bd_types {
1939 struct eth_tx_start_bd start_bd; 2121 struct eth_tx_start_bd start_bd;
1940 struct eth_tx_bd reg_bd; 2122 struct eth_tx_bd reg_bd;
1941 struct eth_tx_parse_bd_e1x parse_bd_e1x; 2123 struct eth_tx_parse_bd_e1x parse_bd_e1x;
2124 struct eth_tx_parse_bd_e2 parse_bd_e2;
1942 struct eth_tx_next_bd next_bd; 2125 struct eth_tx_next_bd next_bd;
1943}; 2126};
1944 2127
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 5ae22e085518..a9d54874a559 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -97,6 +97,9 @@
97#define MISC_AEU_BLOCK 35 97#define MISC_AEU_BLOCK 35
98#define PGLUE_B_BLOCK 36 98#define PGLUE_B_BLOCK 36
99#define IGU_BLOCK 37 99#define IGU_BLOCK 37
100#define ATC_BLOCK 38
101#define QM_4PORT_BLOCK 39
102#define XSEM_4PORT_BLOCK 40
100 103
101 104
102/* Returns the index of start or end of a specific block stage in ops array*/ 105/* Returns the index of start or end of a specific block stage in ops array*/
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index aae7fea00622..e65de784182c 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -486,18 +486,30 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
486 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order); 486 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
487 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order); 487 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
488 488
489 if (r_order == MAX_RD_ORD) 489 if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
490 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); 490 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
491 491
492 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); 492 if (CHIP_IS_E2(bp))
493 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
494 else
495 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
493 496
494 if (CHIP_IS_E1H(bp)) { 497 if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) {
495 /* MPS w_order optimal TH presently TH 498 /* MPS w_order optimal TH presently TH
496 * 128 0 0 2 499 * 128 0 0 2
497 * 256 1 1 3 500 * 256 1 1 3
498 * >=512 2 2 3 501 * >=512 2 2 3
499 */ 502 */
500 val = ((w_order == 0) ? 2 : 3); 503 /* DMAE is special */
504 if (CHIP_IS_E2(bp)) {
505 /* E2 can use optimal TH */
506 val = w_order;
507 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
508 } else {
509 val = ((w_order == 0) ? 2 : 3);
510 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
511 }
512
501 REG_WR(bp, PXP2_REG_WR_HC_MPS, val); 513 REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
502 REG_WR(bp, PXP2_REG_WR_USDM_MPS, val); 514 REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
503 REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val); 515 REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
@@ -507,9 +519,15 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
507 REG_WR(bp, PXP2_REG_WR_TM_MPS, val); 519 REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
508 REG_WR(bp, PXP2_REG_WR_SRC_MPS, val); 520 REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
509 REG_WR(bp, PXP2_REG_WR_DBG_MPS, val); 521 REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
510 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
511 REG_WR(bp, PXP2_REG_WR_CDU_MPS, val); 522 REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
512 } 523 }
524
525 /* Validate number of tags suppoted by device */
526#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
527 val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
528 val &= 0xFF;
529 if (val <= 0x20)
530 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
513} 531}
514 532
515/**************************************************************************** 533/****************************************************************************
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 51d468d430ee..3e99bf9c42b9 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -377,9 +377,60 @@ static u8 bnx2x_emac_enable(struct link_params *params,
377 return 0; 377 return 0;
378} 378}
379 379
380static void bnx2x_update_bmac2(struct link_params *params,
381 struct link_vars *vars,
382 u8 is_lb)
383{
384 /*
385 * Set rx control: Strip CRC and enable BigMAC to relay
386 * control packets to the system as well
387 */
388 u32 wb_data[2];
389 struct bnx2x *bp = params->bp;
390 u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
391 NIG_REG_INGRESS_BMAC0_MEM;
392 u32 val = 0x14;
380 393
394 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
395 /* Enable BigMAC to react on received Pause packets */
396 val |= (1<<5);
397 wb_data[0] = val;
398 wb_data[1] = 0;
399 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL,
400 wb_data, 2);
401 udelay(30);
381 402
382static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars, 403 /* Tx control */
404 val = 0xc0;
405 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
406 val |= 0x800000;
407 wb_data[0] = val;
408 wb_data[1] = 0;
409 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL,
410 wb_data, 2);
411
412 val = 0x8000;
413 wb_data[0] = val;
414 wb_data[1] = 0;
415 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
416 wb_data, 2);
417
418 /* mac control */
419 val = 0x3; /* Enable RX and TX */
420 if (is_lb) {
421 val |= 0x4; /* Local loopback */
422 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
423 }
424
425 wb_data[0] = val;
426 wb_data[1] = 0;
427 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
428 wb_data, 2);
429}
430
431
432static u8 bnx2x_bmac1_enable(struct link_params *params,
433 struct link_vars *vars,
383 u8 is_lb) 434 u8 is_lb)
384{ 435{
385 struct bnx2x *bp = params->bp; 436 struct bnx2x *bp = params->bp;
@@ -389,17 +440,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
389 u32 wb_data[2]; 440 u32 wb_data[2];
390 u32 val; 441 u32 val;
391 442
392 DP(NETIF_MSG_LINK, "Enabling BigMAC\n"); 443 DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
393 /* reset and unreset the BigMac */
394 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
395 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
396 msleep(1);
397
398 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
399 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
400
401 /* enable access for bmac registers */
402 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
403 444
404 /* XGXS control */ 445 /* XGXS control */
405 wb_data[0] = 0x3c; 446 wb_data[0] = 0x3c;
@@ -479,6 +520,103 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
479 wb_data, 2); 520 wb_data, 2);
480 } 521 }
481 522
523
524 return 0;
525}
526
527static u8 bnx2x_bmac2_enable(struct link_params *params,
528 struct link_vars *vars,
529 u8 is_lb)
530{
531 struct bnx2x *bp = params->bp;
532 u8 port = params->port;
533 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
534 NIG_REG_INGRESS_BMAC0_MEM;
535 u32 wb_data[2];
536
537 DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
538
539 wb_data[0] = 0;
540 wb_data[1] = 0;
541 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
542 wb_data, 2);
543 udelay(30);
544
545 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
546 wb_data[0] = 0x3c;
547 wb_data[1] = 0;
548 REG_WR_DMAE(bp, bmac_addr +
549 BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
550 wb_data, 2);
551
552 udelay(30);
553
554 /* tx MAC SA */
555 wb_data[0] = ((params->mac_addr[2] << 24) |
556 (params->mac_addr[3] << 16) |
557 (params->mac_addr[4] << 8) |
558 params->mac_addr[5]);
559 wb_data[1] = ((params->mac_addr[0] << 8) |
560 params->mac_addr[1]);
561 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
562 wb_data, 2);
563
564 udelay(30);
565
566 /* Configure SAFC */
567 wb_data[0] = 0x1000200;
568 wb_data[1] = 0;
569 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
570 wb_data, 2);
571 udelay(30);
572
573 /* set rx mtu */
574 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
575 wb_data[1] = 0;
576 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE,
577 wb_data, 2);
578 udelay(30);
579
580 /* set tx mtu */
581 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
582 wb_data[1] = 0;
583 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE,
584 wb_data, 2);
585 udelay(30);
586 /* set cnt max size */
587 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
588 wb_data[1] = 0;
589 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
590 wb_data, 2);
591 udelay(30);
592 bnx2x_update_bmac2(params, vars, is_lb);
593
594 return 0;
595}
596
597u8 bnx2x_bmac_enable(struct link_params *params,
598 struct link_vars *vars,
599 u8 is_lb)
600{
601 u8 rc, port = params->port;
602 struct bnx2x *bp = params->bp;
603 u32 val;
604 /* reset and unreset the BigMac */
605 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
606 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
607 udelay(10);
608
609 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
610 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
611
612 /* enable access for bmac registers */
613 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
614
615 /* Enable BMAC according to BMAC type*/
616 if (CHIP_IS_E2(bp))
617 rc = bnx2x_bmac2_enable(params, vars, is_lb);
618 else
619 rc = bnx2x_bmac1_enable(params, vars, is_lb);
482 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1); 620 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
483 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0); 621 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
484 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0); 622 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
@@ -493,7 +631,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
493 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1); 631 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
494 632
495 vars->mac_type = MAC_TYPE_BMAC; 633 vars->mac_type = MAC_TYPE_BMAC;
496 return 0; 634 return rc;
497} 635}
498 636
499 637
@@ -519,13 +657,25 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
519 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && 657 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
520 nig_bmac_enable) { 658 nig_bmac_enable) {
521 659
522 /* Clear Rx Enable bit in BMAC_CONTROL register */ 660 if (CHIP_IS_E2(bp)) {
523 REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, 661 /* Clear Rx Enable bit in BMAC_CONTROL register */
524 wb_data, 2); 662 REG_RD_DMAE(bp, bmac_addr +
525 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 663 BIGMAC2_REGISTER_BMAC_CONTROL,
526 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, 664 wb_data, 2);
527 wb_data, 2); 665 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
528 666 REG_WR_DMAE(bp, bmac_addr +
667 BIGMAC2_REGISTER_BMAC_CONTROL,
668 wb_data, 2);
669 } else {
670 /* Clear Rx Enable bit in BMAC_CONTROL register */
671 REG_RD_DMAE(bp, bmac_addr +
672 BIGMAC_REGISTER_BMAC_CONTROL,
673 wb_data, 2);
674 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
675 REG_WR_DMAE(bp, bmac_addr +
676 BIGMAC_REGISTER_BMAC_CONTROL,
677 wb_data, 2);
678 }
529 msleep(1); 679 msleep(1);
530 } 680 }
531} 681}
@@ -821,23 +971,31 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
821 return -EINVAL; 971 return -EINVAL;
822} 972}
823 973
824static void bnx2x_set_aer_mmd(struct link_params *params, 974static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
825 struct bnx2x_phy *phy) 975 struct bnx2x_phy *phy)
826{ 976{
827 struct bnx2x *bp = params->bp;
828 u32 ser_lane; 977 u32 ser_lane;
829 u16 offset; 978 u16 offset, aer_val;
830 979 struct bnx2x *bp = params->bp;
831 ser_lane = ((params->lane_config & 980 ser_lane = ((params->lane_config &
832 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 981 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
833 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 982 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
834 983
835 offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ? 984 offset = phy->addr + ser_lane;
836 (phy->addr + ser_lane) : 0; 985 if (CHIP_IS_E2(bp))
837 986 aer_val = 0x2800 + offset - 1;
987 else
988 aer_val = 0x3800 + offset;
838 CL45_WR_OVER_CL22(bp, phy, 989 CL45_WR_OVER_CL22(bp, phy,
839 MDIO_REG_BANK_AER_BLOCK, 990 MDIO_REG_BANK_AER_BLOCK,
840 MDIO_AER_BLOCK_AER_REG, 0x3800 + offset); 991 MDIO_AER_BLOCK_AER_REG, aer_val);
992}
993static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
994 struct bnx2x_phy *phy)
995{
996 CL45_WR_OVER_CL22(bp, phy,
997 MDIO_REG_BANK_AER_BLOCK,
998 MDIO_AER_BLOCK_AER_REG, 0x3800);
841} 999}
842 1000
843/******************************************************************/ 1001/******************************************************************/
@@ -2046,12 +2204,12 @@ static u8 bnx2x_init_serdes(struct bnx2x_phy *phy,
2046 u8 rc; 2204 u8 rc;
2047 vars->phy_flags |= PHY_SGMII_FLAG; 2205 vars->phy_flags |= PHY_SGMII_FLAG;
2048 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); 2206 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2049 bnx2x_set_aer_mmd(params, phy); 2207 bnx2x_set_aer_mmd_serdes(params->bp, phy);
2050 rc = bnx2x_reset_unicore(params, phy, 1); 2208 rc = bnx2x_reset_unicore(params, phy, 1);
2051 /* reset the SerDes and wait for reset bit return low */ 2209 /* reset the SerDes and wait for reset bit return low */
2052 if (rc != 0) 2210 if (rc != 0)
2053 return rc; 2211 return rc;
2054 bnx2x_set_aer_mmd(params, phy); 2212 bnx2x_set_aer_mmd_serdes(params->bp, phy);
2055 2213
2056 return rc; 2214 return rc;
2057} 2215}
@@ -2076,7 +2234,7 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
2076 vars->phy_flags &= ~PHY_SGMII_FLAG; 2234 vars->phy_flags &= ~PHY_SGMII_FLAG;
2077 2235
2078 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); 2236 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2079 bnx2x_set_aer_mmd(params, phy); 2237 bnx2x_set_aer_mmd_xgxs(params, phy);
2080 bnx2x_set_master_ln(params, phy); 2238 bnx2x_set_master_ln(params, phy);
2081 2239
2082 rc = bnx2x_reset_unicore(params, phy, 0); 2240 rc = bnx2x_reset_unicore(params, phy, 0);
@@ -2084,7 +2242,7 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
2084 if (rc != 0) 2242 if (rc != 0)
2085 return rc; 2243 return rc;
2086 2244
2087 bnx2x_set_aer_mmd(params, phy); 2245 bnx2x_set_aer_mmd_xgxs(params, phy);
2088 2246
2089 /* setting the masterLn_def again after the reset */ 2247 /* setting the masterLn_def again after the reset */
2090 bnx2x_set_master_ln(params, phy); 2248 bnx2x_set_master_ln(params, phy);
@@ -2358,7 +2516,7 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
2358 0x6041); 2516 0x6041);
2359 msleep(200); 2517 msleep(200);
2360 /* set aer mmd back */ 2518 /* set aer mmd back */
2361 bnx2x_set_aer_mmd(params, phy); 2519 bnx2x_set_aer_mmd_xgxs(params, phy);
2362 2520
2363 /* and md_devad */ 2521 /* and md_devad */
2364 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 2522 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
@@ -2721,7 +2879,10 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
2721 struct bnx2x *bp = params->bp; 2879 struct bnx2x *bp = params->bp;
2722 u8 gpio_port; 2880 u8 gpio_port;
2723 /* HW reset */ 2881 /* HW reset */
2724 gpio_port = params->port; 2882 if (CHIP_IS_E2(bp))
2883 gpio_port = BP_PATH(bp);
2884 else
2885 gpio_port = params->port;
2725 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2886 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2726 MISC_REGISTERS_GPIO_OUTPUT_LOW, 2887 MISC_REGISTERS_GPIO_OUTPUT_LOW,
2727 gpio_port); 2888 gpio_port);
@@ -2799,8 +2960,9 @@ static u8 bnx2x_update_link_up(struct link_params *params,
2799 } 2960 }
2800 2961
2801 /* PBF - link up */ 2962 /* PBF - link up */
2802 rc |= bnx2x_pbf_update(params, vars->flow_ctrl, 2963 if (!(CHIP_IS_E2(bp)))
2803 vars->line_speed); 2964 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
2965 vars->line_speed);
2804 2966
2805 /* disable drain */ 2967 /* disable drain */
2806 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); 2968 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
@@ -3443,7 +3605,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
3443 u8 gpio_port; 3605 u8 gpio_port;
3444 DP(NETIF_MSG_LINK, "Init 8073\n"); 3606 DP(NETIF_MSG_LINK, "Init 8073\n");
3445 3607
3446 gpio_port = params->port; 3608 if (CHIP_IS_E2(bp))
3609 gpio_port = BP_PATH(bp);
3610 else
3611 gpio_port = params->port;
3447 /* Restore normal power mode*/ 3612 /* Restore normal power mode*/
3448 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 3613 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3449 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 3614 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
@@ -3680,7 +3845,10 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
3680{ 3845{
3681 struct bnx2x *bp = params->bp; 3846 struct bnx2x *bp = params->bp;
3682 u8 gpio_port; 3847 u8 gpio_port;
3683 gpio_port = params->port; 3848 if (CHIP_IS_E2(bp))
3849 gpio_port = BP_PATH(bp);
3850 else
3851 gpio_port = params->port;
3684 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n", 3852 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
3685 gpio_port); 3853 gpio_port);
3686 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 3854 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
@@ -6371,7 +6539,10 @@ static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
6371 phy->mdio_ctrl = bnx2x_get_emac_base(bp, 6539 phy->mdio_ctrl = bnx2x_get_emac_base(bp,
6372 SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH, 6540 SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
6373 port); 6541 port);
6374 phy->def_md_devad = DEFAULT_PHY_DEV_ADDR; 6542 if (CHIP_IS_E2(bp))
6543 phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR;
6544 else
6545 phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
6375 6546
6376 DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n", 6547 DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
6377 port, phy->addr, phy->mdio_ctrl); 6548 port, phy->addr, phy->mdio_ctrl);
@@ -6742,7 +6913,9 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6742 } 6913 }
6743 6914
6744 bnx2x_emac_enable(params, vars, 0); 6915 bnx2x_emac_enable(params, vars, 0);
6745 bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed); 6916 if (!(CHIP_IS_E2(bp)))
6917 bnx2x_pbf_update(params, vars->flow_ctrl,
6918 vars->line_speed);
6746 /* disable drain */ 6919 /* disable drain */
6747 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 6920 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
6748 6921
@@ -6932,18 +7105,34 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6932/****************************************************************************/ 7105/****************************************************************************/
6933/* Common function */ 7106/* Common function */
6934/****************************************************************************/ 7107/****************************************************************************/
6935static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base, u8 phy_index) 7108static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7109 u32 shmem_base_path[],
7110 u32 shmem2_base_path[], u8 phy_index,
7111 u32 chip_id)
6936{ 7112{
6937 struct bnx2x_phy phy[PORT_MAX]; 7113 struct bnx2x_phy phy[PORT_MAX];
6938 struct bnx2x_phy *phy_blk[PORT_MAX]; 7114 struct bnx2x_phy *phy_blk[PORT_MAX];
6939 u16 val; 7115 u16 val;
6940 s8 port; 7116 s8 port;
7117 s8 port_of_path = 0;
6941 7118
6942 /* PART1 - Reset both phys */ 7119 /* PART1 - Reset both phys */
6943 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7120 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7121 u32 shmem_base, shmem2_base;
7122 /* In E2, same phy is using for port0 of the two paths */
7123 if (CHIP_IS_E2(bp)) {
7124 shmem_base = shmem_base_path[port];
7125 shmem2_base = shmem2_base_path[port];
7126 port_of_path = 0;
7127 } else {
7128 shmem_base = shmem_base_path[0];
7129 shmem2_base = shmem2_base_path[0];
7130 port_of_path = port;
7131 }
7132
6944 /* Extract the ext phy address for the port */ 7133 /* Extract the ext phy address for the port */
6945 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, 7134 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
6946 port, &phy[port]) != 7135 port_of_path, &phy[port]) !=
6947 0) { 7136 0) {
6948 DP(NETIF_MSG_LINK, "populate_phy failed\n"); 7137 DP(NETIF_MSG_LINK, "populate_phy failed\n");
6949 return -EINVAL; 7138 return -EINVAL;
@@ -6981,9 +7170,15 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem
6981 /* PART2 - Download firmware to both phys */ 7170 /* PART2 - Download firmware to both phys */
6982 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7171 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6983 u16 fw_ver1; 7172 u16 fw_ver1;
7173 if (CHIP_IS_E2(bp))
7174 port_of_path = 0;
7175 else
7176 port_of_path = port;
6984 7177
7178 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7179 phy_blk[port]->addr);
6985 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 7180 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
6986 port); 7181 port_of_path);
6987 7182
6988 bnx2x_cl45_read(bp, phy_blk[port], 7183 bnx2x_cl45_read(bp, phy_blk[port],
6989 MDIO_PMA_DEVAD, 7184 MDIO_PMA_DEVAD,
@@ -7039,9 +7234,10 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem
7039 } 7234 }
7040 return 0; 7235 return 0;
7041} 7236}
7042 7237static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
7043static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base, 7238 u32 shmem_base_path[],
7044 u32 shmem2_base, u8 phy_index) 7239 u32 shmem2_base_path[], u8 phy_index,
7240 u32 chip_id)
7045{ 7241{
7046 u32 val; 7242 u32 val;
7047 s8 port; 7243 s8 port;
@@ -7056,6 +7252,16 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7056 bnx2x_ext_phy_hw_reset(bp, 1); 7252 bnx2x_ext_phy_hw_reset(bp, 1);
7057 msleep(5); 7253 msleep(5);
7058 for (port = 0; port < PORT_MAX; port++) { 7254 for (port = 0; port < PORT_MAX; port++) {
7255 u32 shmem_base, shmem2_base;
7256
7257 /* In E2, same phy is using for port0 of the two paths */
7258 if (CHIP_IS_E2(bp)) {
7259 shmem_base = shmem_base_path[port];
7260 shmem2_base = shmem2_base_path[port];
7261 } else {
7262 shmem_base = shmem_base_path[0];
7263 shmem2_base = shmem2_base_path[0];
7264 }
7059 /* Extract the ext phy address for the port */ 7265 /* Extract the ext phy address for the port */
7060 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, 7266 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
7061 port, &phy) != 7267 port, &phy) !=
@@ -7077,14 +7283,16 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7077 7283
7078 return 0; 7284 return 0;
7079} 7285}
7080static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base, 7286static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7081 u32 shmem2_base, u8 phy_index) 7287 u32 shmem_base_path[],
7288 u32 shmem2_base_path[], u8 phy_index,
7289 u32 chip_id)
7082{ 7290{
7083 s8 port; 7291 s8 port;
7084 u32 swap_val, swap_override; 7292 u32 swap_val, swap_override;
7085 struct bnx2x_phy phy[PORT_MAX]; 7293 struct bnx2x_phy phy[PORT_MAX];
7086 struct bnx2x_phy *phy_blk[PORT_MAX]; 7294 struct bnx2x_phy *phy_blk[PORT_MAX];
7087 DP(NETIF_MSG_LINK, "Executing BCM8727 common init\n"); 7295 s8 port_of_path;
7088 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 7296 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7089 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 7297 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7090 7298
@@ -7099,19 +7307,33 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7099 7307
7100 /* PART1 - Reset both phys */ 7308 /* PART1 - Reset both phys */
7101 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7309 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7310 u32 shmem_base, shmem2_base;
7311
7312 /* In E2, same phy is using for port0 of the two paths */
7313 if (CHIP_IS_E2(bp)) {
7314 shmem_base = shmem_base_path[port];
7315 shmem2_base = shmem2_base_path[port];
7316 port_of_path = 0;
7317 } else {
7318 shmem_base = shmem_base_path[0];
7319 shmem2_base = shmem2_base_path[0];
7320 port_of_path = port;
7321 }
7322
7102 /* Extract the ext phy address for the port */ 7323 /* Extract the ext phy address for the port */
7103 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, 7324 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
7104 port, &phy[port]) != 7325 port_of_path, &phy[port]) !=
7105 0) { 7326 0) {
7106 DP(NETIF_MSG_LINK, "populate phy failed\n"); 7327 DP(NETIF_MSG_LINK, "populate phy failed\n");
7107 return -EINVAL; 7328 return -EINVAL;
7108 } 7329 }
7109 /* disable attentions */ 7330 /* disable attentions */
7110 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7331 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
7111 (NIG_MASK_XGXS0_LINK_STATUS | 7332 port_of_path*4,
7112 NIG_MASK_XGXS0_LINK10G | 7333 (NIG_MASK_XGXS0_LINK_STATUS |
7113 NIG_MASK_SERDES0_LINK_STATUS | 7334 NIG_MASK_XGXS0_LINK10G |
7114 NIG_MASK_MI_INT)); 7335 NIG_MASK_SERDES0_LINK_STATUS |
7336 NIG_MASK_MI_INT));
7115 7337
7116 7338
7117 /* Reset the phy */ 7339 /* Reset the phy */
@@ -7133,9 +7355,14 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7133 /* PART2 - Download firmware to both phys */ 7355 /* PART2 - Download firmware to both phys */
7134 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7356 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7135 u16 fw_ver1; 7357 u16 fw_ver1;
7136 7358 if (CHIP_IS_E2(bp))
7359 port_of_path = 0;
7360 else
7361 port_of_path = port;
7362 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7363 phy_blk[port]->addr);
7137 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 7364 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
7138 port); 7365 port_of_path);
7139 bnx2x_cl45_read(bp, phy_blk[port], 7366 bnx2x_cl45_read(bp, phy_blk[port],
7140 MDIO_PMA_DEVAD, 7367 MDIO_PMA_DEVAD,
7141 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 7368 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
@@ -7151,29 +7378,32 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7151 return 0; 7378 return 0;
7152} 7379}
7153 7380
7154static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base, 7381static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
7155 u32 shmem2_base, u8 phy_index, 7382 u32 shmem2_base_path[], u8 phy_index,
7156 u32 ext_phy_type) 7383 u32 ext_phy_type, u32 chip_id)
7157{ 7384{
7158 u8 rc = 0; 7385 u8 rc = 0;
7159 7386
7160 switch (ext_phy_type) { 7387 switch (ext_phy_type) {
7161 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 7388 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7162 rc = bnx2x_8073_common_init_phy(bp, shmem_base, 7389 rc = bnx2x_8073_common_init_phy(bp, shmem_base_path,
7163 shmem2_base, phy_index); 7390 shmem2_base_path,
7391 phy_index, chip_id);
7164 break; 7392 break;
7165 7393
7166 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 7394 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7167 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: 7395 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
7168 rc = bnx2x_8727_common_init_phy(bp, shmem_base, 7396 rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
7169 shmem2_base, phy_index); 7397 shmem2_base_path,
7398 phy_index, chip_id);
7170 break; 7399 break;
7171 7400
7172 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 7401 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7173 /* GPIO1 affects both ports, so there's need to pull 7402 /* GPIO1 affects both ports, so there's need to pull
7174 it for single port alone */ 7403 it for single port alone */
7175 rc = bnx2x_8726_common_init_phy(bp, shmem_base, 7404 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
7176 shmem2_base, phy_index); 7405 shmem2_base_path,
7406 phy_index, chip_id);
7177 break; 7407 break;
7178 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 7408 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7179 rc = -EINVAL; 7409 rc = -EINVAL;
@@ -7188,8 +7418,8 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base,
7188 return rc; 7418 return rc;
7189} 7419}
7190 7420
7191u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base, 7421u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
7192 u32 shmem2_base) 7422 u32 shmem2_base_path[], u32 chip_id)
7193{ 7423{
7194 u8 rc = 0; 7424 u8 rc = 0;
7195 u8 phy_index; 7425 u8 phy_index;
@@ -7203,12 +7433,13 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7203 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; 7433 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
7204 phy_index++) { 7434 phy_index++) {
7205 ext_phy_config = bnx2x_get_ext_phy_config(bp, 7435 ext_phy_config = bnx2x_get_ext_phy_config(bp,
7206 shmem_base, 7436 shmem_base_path[0],
7207 phy_index, 0); 7437 phy_index, 0);
7208 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 7438 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
7209 rc |= bnx2x_ext_phy_common_init(bp, shmem_base, 7439 rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path,
7210 shmem2_base, 7440 shmem2_base_path,
7211 phy_index, ext_phy_type); 7441 phy_index, ext_phy_type,
7442 chip_id);
7212 } 7443 }
7213 return rc; 7444 return rc;
7214} 7445}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index e98ea3d19471..58a4c7199276 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -22,7 +22,8 @@
22/***********************************************************/ 22/***********************************************************/
23/* Defines */ 23/* Defines */
24/***********************************************************/ 24/***********************************************************/
25#define DEFAULT_PHY_DEV_ADDR 3 25#define DEFAULT_PHY_DEV_ADDR 3
26#define E2_DEFAULT_PHY_DEV_ADDR 5
26 27
27 28
28 29
@@ -315,7 +316,8 @@ u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars,
315 u8 is_serdes); 316 u8 is_serdes);
316 317
317/* One-time initialization for external phy after power up */ 318/* One-time initialization for external phy after power up */
318u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base); 319u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
320 u32 shmem2_base_path[], u32 chip_id);
319 321
320/* Reset the external PHY using GPIO */ 322/* Reset the external PHY using GPIO */
321void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); 323void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 2c04b97f85a9..0ac416a14202 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -23,7 +23,6 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h> 26#include <linux/interrupt.h>
28#include <linux/pci.h> 27#include <linux/pci.h>
29#include <linux/init.h> 28#include <linux/init.h>
@@ -68,6 +67,7 @@
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" 68#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71 71
72/* Time in jiffies before concluding the transmitter is hung */ 72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ) 73#define TX_TIMEOUT (5*HZ)
@@ -77,11 +77,13 @@ static char version[] __devinitdata =
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78 78
79MODULE_AUTHOR("Eliezer Tamir"); 79MODULE_AUTHOR("Eliezer Tamir");
80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); 80MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
81MODULE_LICENSE("GPL"); 82MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION); 83MODULE_VERSION(DRV_MODULE_VERSION);
83MODULE_FIRMWARE(FW_FILE_NAME_E1); 84MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H); 85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86MODULE_FIRMWARE(FW_FILE_NAME_E2);
85 87
86static int multi_mode = 1; 88static int multi_mode = 1;
87module_param(multi_mode, int, 0); 89module_param(multi_mode, int, 0);
@@ -124,6 +126,8 @@ enum bnx2x_board_type {
124 BCM57710 = 0, 126 BCM57710 = 0,
125 BCM57711 = 1, 127 BCM57711 = 1,
126 BCM57711E = 2, 128 BCM57711E = 2,
129 BCM57712 = 3,
130 BCM57712E = 4
127}; 131};
128 132
129/* indexed by board_type, above */ 133/* indexed by board_type, above */
@@ -132,14 +136,24 @@ static struct {
132} board_info[] __devinitdata = { 136} board_info[] __devinitdata = {
133 { "Broadcom NetXtreme II BCM57710 XGb" }, 137 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" }, 138 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" } 139 { "Broadcom NetXtreme II BCM57711E XGb" },
140 { "Broadcom NetXtreme II BCM57712 XGb" },
141 { "Broadcom NetXtreme II BCM57712E XGb" }
136}; 142};
137 143
144#ifndef PCI_DEVICE_ID_NX2_57712
145#define PCI_DEVICE_ID_NX2_57712 0x1662
146#endif
147#ifndef PCI_DEVICE_ID_NX2_57712E
148#define PCI_DEVICE_ID_NX2_57712E 0x1663
149#endif
138 150
139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 151static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
143 { 0 } 157 { 0 }
144}; 158};
145 159
@@ -353,7 +367,8 @@ static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
353 u8 ticks) 367 u8 ticks)
354{ 368{
355 369
356 int index_offset = 370 int index_offset = CHIP_IS_E2(bp) ?
371 offsetof(struct hc_status_block_data_e2, index_data) :
357 offsetof(struct hc_status_block_data_e1x, index_data); 372 offsetof(struct hc_status_block_data_e1x, index_data);
358 u32 addr = BAR_CSTRORM_INTMEM + 373 u32 addr = BAR_CSTRORM_INTMEM +
359 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 374 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
@@ -369,7 +384,8 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
369 u8 disable) 384 u8 disable)
370{ 385{
371 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 386 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
372 int index_offset = 387 int index_offset = CHIP_IS_E2(bp) ?
388 offsetof(struct hc_status_block_data_e2, index_data) :
373 offsetof(struct hc_status_block_data_e1x, index_data); 389 offsetof(struct hc_status_block_data_e1x, index_data);
374 u32 addr = BAR_CSTRORM_INTMEM + 390 u32 addr = BAR_CSTRORM_INTMEM +
375 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 391 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
@@ -408,6 +424,75 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
408 return val; 424 return val;
409} 425}
410 426
427#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
428#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
429#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
430#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
431#define DMAE_DP_DST_NONE "dst_addr [none]"
432
433void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
434{
435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
436
437 switch (dmae->opcode & DMAE_COMMAND_DST) {
438 case DMAE_CMD_DST_PCI:
439 if (src_type == DMAE_CMD_SRC_PCI)
440 DP(msglvl, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
444 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
445 dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 else
448 DP(msglvl, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae->opcode, dmae->src_addr_lo >> 2,
452 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
453 dmae->comp_addr_hi, dmae->comp_addr_lo,
454 dmae->comp_val);
455 break;
456 case DMAE_CMD_DST_GRC:
457 if (src_type == DMAE_CMD_SRC_PCI)
458 DP(msglvl, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
462 dmae->len, dmae->dst_addr_lo >> 2,
463 dmae->comp_addr_hi, dmae->comp_addr_lo,
464 dmae->comp_val);
465 else
466 DP(msglvl, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae->opcode, dmae->src_addr_lo >> 2,
470 dmae->len, dmae->dst_addr_lo >> 2,
471 dmae->comp_addr_hi, dmae->comp_addr_lo,
472 dmae->comp_val);
473 break;
474 default:
475 if (src_type == DMAE_CMD_SRC_PCI)
476 DP(msglvl, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 "dst_addr [none]\n"
479 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
481 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
482 dmae->comp_val);
483 else
484 DP(msglvl, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 "dst_addr [none]\n"
487 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae->opcode, dmae->src_addr_lo >> 2,
489 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
490 dmae->comp_val);
491 break;
492 }
493
494}
495
411const u32 dmae_reg_go_c[] = { 496const u32 dmae_reg_go_c[] = {
412 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, 497 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
413 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, 498 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
@@ -431,85 +516,137 @@ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
431 REG_WR(bp, dmae_reg_go_c[idx], 1); 516 REG_WR(bp, dmae_reg_go_c[idx], 1);
432} 517}
433 518
434void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 519u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
435 u32 len32)
436{ 520{
437 struct dmae_command dmae; 521 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
438 u32 *wb_comp = bnx2x_sp(bp, wb_comp); 522 DMAE_CMD_C_ENABLE);
439 int cnt = 200; 523}
440 524
441 if (!bp->dmae_ready) { 525u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
442 u32 *data = bnx2x_sp(bp, wb_data[0]); 526{
527 return opcode & ~DMAE_CMD_SRC_RESET;
528}
443 529
444 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)" 530u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
445 " using indirect\n", dst_addr, len32); 531 bool with_comp, u8 comp_type)
446 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 532{
447 return; 533 u32 opcode = 0;
448 } 534
535 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
536 (dst_type << DMAE_COMMAND_DST_SHIFT));
449 537
450 memset(&dmae, 0, sizeof(struct dmae_command)); 538 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
539
540 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
541 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
542 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
543 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
451 544
452 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
453 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
454 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
455#ifdef __BIG_ENDIAN 545#ifdef __BIG_ENDIAN
456 DMAE_CMD_ENDIANITY_B_DW_SWAP | 546 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
457#else 547#else
458 DMAE_CMD_ENDIANITY_DW_SWAP | 548 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
459#endif 549#endif
460 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | 550 if (with_comp)
461 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); 551 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
462 dmae.src_addr_lo = U64_LO(dma_addr); 552 return opcode;
463 dmae.src_addr_hi = U64_HI(dma_addr); 553}
464 dmae.dst_addr_lo = dst_addr >> 2; 554
465 dmae.dst_addr_hi = 0; 555void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
466 dmae.len = len32; 556 u8 src_type, u8 dst_type)
467 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); 557{
468 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); 558 memset(dmae, 0, sizeof(struct dmae_command));
469 dmae.comp_val = DMAE_COMP_VAL; 559
470 560 /* set the opcode */
471 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n" 561 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
472 DP_LEVEL "src_addr [%x:%08x] len [%d *4] " 562 true, DMAE_COMP_PCI);
473 "dst_addr [%x:%08x (%08x)]\n" 563
474 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n", 564 /* fill in the completion parameters */
475 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo, 565 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
476 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr, 566 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
477 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val); 567 dmae->comp_val = DMAE_COMP_VAL;
478 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", 568}
569
570/* issue a dmae command over the init-channel and wailt for completion */
571int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
572{
573 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
574 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
575 int rc = 0;
576
577 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
479 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], 578 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
480 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 579 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
481 580
581 /* lock the dmae channel */
482 mutex_lock(&bp->dmae_mutex); 582 mutex_lock(&bp->dmae_mutex);
483 583
584 /* reset completion */
484 *wb_comp = 0; 585 *wb_comp = 0;
485 586
486 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp)); 587 /* post the command on the channel used for initializations */
588 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
487 589
590 /* wait for completion */
488 udelay(5); 591 udelay(5);
489 592 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
490 while (*wb_comp != DMAE_COMP_VAL) {
491 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); 593 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
492 594
493 if (!cnt) { 595 if (!cnt) {
494 BNX2X_ERR("DMAE timeout!\n"); 596 BNX2X_ERR("DMAE timeout!\n");
495 break; 597 rc = DMAE_TIMEOUT;
598 goto unlock;
496 } 599 }
497 cnt--; 600 cnt--;
498 /* adjust delay for emulation/FPGA */ 601 udelay(50);
499 if (CHIP_REV_IS_SLOW(bp))
500 msleep(100);
501 else
502 udelay(5);
503 } 602 }
603 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
604 BNX2X_ERR("DMAE PCI error!\n");
605 rc = DMAE_PCI_ERROR;
606 }
607
608 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
609 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
610 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
504 611
612unlock:
505 mutex_unlock(&bp->dmae_mutex); 613 mutex_unlock(&bp->dmae_mutex);
614 return rc;
615}
616
617void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
618 u32 len32)
619{
620 struct dmae_command dmae;
621
622 if (!bp->dmae_ready) {
623 u32 *data = bnx2x_sp(bp, wb_data[0]);
624
625 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
626 " using indirect\n", dst_addr, len32);
627 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
628 return;
629 }
630
631 /* set opcode and fixed command fields */
632 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
633
634 /* fill in addresses and len */
635 dmae.src_addr_lo = U64_LO(dma_addr);
636 dmae.src_addr_hi = U64_HI(dma_addr);
637 dmae.dst_addr_lo = dst_addr >> 2;
638 dmae.dst_addr_hi = 0;
639 dmae.len = len32;
640
641 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
642
643 /* issue the command and wait for completion */
644 bnx2x_issue_dmae_with_comp(bp, &dmae);
506} 645}
507 646
508void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 647void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
509{ 648{
510 struct dmae_command dmae; 649 struct dmae_command dmae;
511 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
512 int cnt = 200;
513 650
514 if (!bp->dmae_ready) { 651 if (!bp->dmae_ready) {
515 u32 *data = bnx2x_sp(bp, wb_data[0]); 652 u32 *data = bnx2x_sp(bp, wb_data[0]);
@@ -522,62 +659,20 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
522 return; 659 return;
523 } 660 }
524 661
525 memset(&dmae, 0, sizeof(struct dmae_command)); 662 /* set opcode and fixed command fields */
663 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
526 664
527 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 665 /* fill in addresses and len */
528 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
529 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
530#ifdef __BIG_ENDIAN
531 DMAE_CMD_ENDIANITY_B_DW_SWAP |
532#else
533 DMAE_CMD_ENDIANITY_DW_SWAP |
534#endif
535 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
536 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
537 dmae.src_addr_lo = src_addr >> 2; 666 dmae.src_addr_lo = src_addr >> 2;
538 dmae.src_addr_hi = 0; 667 dmae.src_addr_hi = 0;
539 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); 668 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
540 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 669 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
541 dmae.len = len32; 670 dmae.len = len32;
542 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
543 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
544 dmae.comp_val = DMAE_COMP_VAL;
545
546 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
547 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
548 "dst_addr [%x:%08x (%08x)]\n"
549 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
550 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
551 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
552 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
553
554 mutex_lock(&bp->dmae_mutex);
555
556 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
557 *wb_comp = 0;
558
559 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
560
561 udelay(5);
562 671
563 while (*wb_comp != DMAE_COMP_VAL) { 672 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
564 673
565 if (!cnt) { 674 /* issue the command and wait for completion */
566 BNX2X_ERR("DMAE timeout!\n"); 675 bnx2x_issue_dmae_with_comp(bp, &dmae);
567 break;
568 }
569 cnt--;
570 /* adjust delay for emulation/FPGA */
571 if (CHIP_REV_IS_SLOW(bp))
572 msleep(100);
573 else
574 udelay(5);
575 }
576 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
579
580 mutex_unlock(&bp->dmae_mutex);
581} 676}
582 677
583void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 678void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
@@ -744,19 +839,24 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
744 u32 mark, offset; 839 u32 mark, offset;
745 __be32 data[9]; 840 __be32 data[9];
746 int word; 841 int word;
747 842 u32 trace_shmem_base;
748 if (BP_NOMCP(bp)) { 843 if (BP_NOMCP(bp)) {
749 BNX2X_ERR("NO MCP - can not dump\n"); 844 BNX2X_ERR("NO MCP - can not dump\n");
750 return; 845 return;
751 } 846 }
752 847
753 addr = bp->common.shmem_base - 0x0800 + 4; 848 if (BP_PATH(bp) == 0)
849 trace_shmem_base = bp->common.shmem_base;
850 else
851 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
852 addr = trace_shmem_base - 0x0800 + 4;
754 mark = REG_RD(bp, addr); 853 mark = REG_RD(bp, addr);
755 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000; 854 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
855 + ((mark + 0x3) & ~0x3) - 0x08000000;
756 pr_err("begin fw dump (mark 0x%x)\n", mark); 856 pr_err("begin fw dump (mark 0x%x)\n", mark);
757 857
758 pr_err(""); 858 pr_err("");
759 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) { 859 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
760 for (word = 0; word < 8; word++) 860 for (word = 0; word < 8; word++)
761 data[word] = htonl(REG_RD(bp, offset + 4*word)); 861 data[word] = htonl(REG_RD(bp, offset + 4*word));
762 data[8] = 0x0; 862 data[8] = 0x0;
@@ -822,10 +922,15 @@ void bnx2x_panic_dump(struct bnx2x *bp)
822 for_each_queue(bp, i) { 922 for_each_queue(bp, i) {
823 struct bnx2x_fastpath *fp = &bp->fp[i]; 923 struct bnx2x_fastpath *fp = &bp->fp[i];
824 int loop; 924 int loop;
925 struct hc_status_block_data_e2 sb_data_e2;
825 struct hc_status_block_data_e1x sb_data_e1x; 926 struct hc_status_block_data_e1x sb_data_e1x;
826 struct hc_status_block_sm *hc_sm_p = 927 struct hc_status_block_sm *hc_sm_p =
928 CHIP_IS_E2(bp) ?
929 sb_data_e2.common.state_machine :
827 sb_data_e1x.common.state_machine; 930 sb_data_e1x.common.state_machine;
828 struct hc_index_data *hc_index_p = 931 struct hc_index_data *hc_index_p =
932 CHIP_IS_E2(bp) ?
933 sb_data_e2.index_data :
829 sb_data_e1x.index_data; 934 sb_data_e1x.index_data;
830 int data_size; 935 int data_size;
831 u32 *sb_data_p; 936 u32 *sb_data_p;
@@ -849,7 +954,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
849 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 954 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
850 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 955 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
851 956
852 loop = HC_SB_MAX_INDICES_E1X; 957 loop = CHIP_IS_E2(bp) ?
958 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
853 959
854 /* host sb data */ 960 /* host sb data */
855 961
@@ -865,23 +971,36 @@ void bnx2x_panic_dump(struct bnx2x *bp)
865 fp->sb_index_values[j], 971 fp->sb_index_values[j],
866 (j == loop - 1) ? ")" : " "); 972 (j == loop - 1) ? ")" : " ");
867 /* fw sb data */ 973 /* fw sb data */
868 data_size = 974 data_size = CHIP_IS_E2(bp) ?
975 sizeof(struct hc_status_block_data_e2) :
869 sizeof(struct hc_status_block_data_e1x); 976 sizeof(struct hc_status_block_data_e1x);
870 data_size /= sizeof(u32); 977 data_size /= sizeof(u32);
871 sb_data_p = (u32 *)&sb_data_e1x; 978 sb_data_p = CHIP_IS_E2(bp) ?
979 (u32 *)&sb_data_e2 :
980 (u32 *)&sb_data_e1x;
872 /* copy sb data in here */ 981 /* copy sb data in here */
873 for (j = 0; j < data_size; j++) 982 for (j = 0; j < data_size; j++)
874 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + 983 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
875 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + 984 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
876 j * sizeof(u32)); 985 j * sizeof(u32));
877 986
878 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) " 987 if (CHIP_IS_E2(bp)) {
879 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n", 988 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
880 sb_data_e1x.common.p_func.pf_id, 989 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
881 sb_data_e1x.common.p_func.vf_id, 990 sb_data_e2.common.p_func.pf_id,
882 sb_data_e1x.common.p_func.vf_valid, 991 sb_data_e2.common.p_func.vf_id,
883 sb_data_e1x.common.p_func.vnic_id, 992 sb_data_e2.common.p_func.vf_valid,
884 sb_data_e1x.common.same_igu_sb_1b); 993 sb_data_e2.common.p_func.vnic_id,
994 sb_data_e2.common.same_igu_sb_1b);
995 } else {
996 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
997 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
998 sb_data_e1x.common.p_func.pf_id,
999 sb_data_e1x.common.p_func.vf_id,
1000 sb_data_e1x.common.p_func.vf_valid,
1001 sb_data_e1x.common.p_func.vnic_id,
1002 sb_data_e1x.common.same_igu_sb_1b);
1003 }
885 1004
886 /* SB_SMs data */ 1005 /* SB_SMs data */
887 for (j = 0; j < HC_SB_MAX_SM; j++) { 1006 for (j = 0; j < HC_SB_MAX_SM; j++) {
@@ -969,7 +1088,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
969 BNX2X_ERR("end crash dump -----------------\n"); 1088 BNX2X_ERR("end crash dump -----------------\n");
970} 1089}
971 1090
972void bnx2x_int_enable(struct bnx2x *bp) 1091static void bnx2x_hc_int_enable(struct bnx2x *bp)
973{ 1092{
974 int port = BP_PORT(bp); 1093 int port = BP_PORT(bp);
975 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1094 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -1011,7 +1130,7 @@ void bnx2x_int_enable(struct bnx2x *bp)
1011 mmiowb(); 1130 mmiowb();
1012 barrier(); 1131 barrier();
1013 1132
1014 if (CHIP_IS_E1H(bp)) { 1133 if (!CHIP_IS_E1(bp)) {
1015 /* init leading/trailing edge */ 1134 /* init leading/trailing edge */
1016 if (IS_MF(bp)) { 1135 if (IS_MF(bp)) {
1017 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1136 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
@@ -1029,7 +1148,66 @@ void bnx2x_int_enable(struct bnx2x *bp)
1029 mmiowb(); 1148 mmiowb();
1030} 1149}
1031 1150
1032void bnx2x_int_disable(struct bnx2x *bp) 1151static void bnx2x_igu_int_enable(struct bnx2x *bp)
1152{
1153 u32 val;
1154 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1155 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1156
1157 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1158
1159 if (msix) {
1160 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1161 IGU_PF_CONF_SINGLE_ISR_EN);
1162 val |= (IGU_PF_CONF_FUNC_EN |
1163 IGU_PF_CONF_MSI_MSIX_EN |
1164 IGU_PF_CONF_ATTN_BIT_EN);
1165 } else if (msi) {
1166 val &= ~IGU_PF_CONF_INT_LINE_EN;
1167 val |= (IGU_PF_CONF_FUNC_EN |
1168 IGU_PF_CONF_MSI_MSIX_EN |
1169 IGU_PF_CONF_ATTN_BIT_EN |
1170 IGU_PF_CONF_SINGLE_ISR_EN);
1171 } else {
1172 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1173 val |= (IGU_PF_CONF_FUNC_EN |
1174 IGU_PF_CONF_INT_LINE_EN |
1175 IGU_PF_CONF_ATTN_BIT_EN |
1176 IGU_PF_CONF_SINGLE_ISR_EN);
1177 }
1178
1179 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1180 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1181
1182 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1183
1184 barrier();
1185
1186 /* init leading/trailing edge */
1187 if (IS_MF(bp)) {
1188 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1189 if (bp->port.pmf)
1190 /* enable nig and gpio3 attention */
1191 val |= 0x1100;
1192 } else
1193 val = 0xffff;
1194
1195 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1196 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1197
1198 /* Make sure that interrupts are indeed enabled from here on */
1199 mmiowb();
1200}
1201
1202void bnx2x_int_enable(struct bnx2x *bp)
1203{
1204 if (bp->common.int_block == INT_BLOCK_HC)
1205 bnx2x_hc_int_enable(bp);
1206 else
1207 bnx2x_igu_int_enable(bp);
1208}
1209
1210static void bnx2x_hc_int_disable(struct bnx2x *bp)
1033{ 1211{
1034 int port = BP_PORT(bp); 1212 int port = BP_PORT(bp);
1035 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1213 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -1051,6 +1229,32 @@ void bnx2x_int_disable(struct bnx2x *bp)
1051 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1229 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1052} 1230}
1053 1231
1232static void bnx2x_igu_int_disable(struct bnx2x *bp)
1233{
1234 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1235
1236 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1237 IGU_PF_CONF_INT_LINE_EN |
1238 IGU_PF_CONF_ATTN_BIT_EN);
1239
1240 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1241
1242 /* flush all outstanding writes */
1243 mmiowb();
1244
1245 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1246 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1247 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1248}
1249
1250void bnx2x_int_disable(struct bnx2x *bp)
1251{
1252 if (bp->common.int_block == INT_BLOCK_HC)
1253 bnx2x_hc_int_disable(bp);
1254 else
1255 bnx2x_igu_int_disable(bp);
1256}
1257
1054void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 1258void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1055{ 1259{
1056 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1260 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -1194,7 +1398,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1194 return IRQ_HANDLED; 1398 return IRQ_HANDLED;
1195#endif 1399#endif
1196 1400
1197 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) { 1401 for_each_queue(bp, i) {
1198 struct bnx2x_fastpath *fp = &bp->fp[i]; 1402 struct bnx2x_fastpath *fp = &bp->fp[i];
1199 1403
1200 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE); 1404 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
@@ -1579,7 +1783,7 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1579 /* Initialize link parameters structure variables */ 1783 /* Initialize link parameters structure variables */
1580 /* It is recommended to turn off RX FC for jumbo frames 1784 /* It is recommended to turn off RX FC for jumbo frames
1581 for better performance */ 1785 for better performance */
1582 if (bp->dev->mtu > 5000) 1786 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1583 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; 1787 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1584 else 1788 else
1585 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 1789 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
@@ -1693,13 +1897,11 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp)
1693static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) 1897static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1694{ 1898{
1695 int all_zero = 1; 1899 int all_zero = 1;
1696 int port = BP_PORT(bp);
1697 int vn; 1900 int vn;
1698 1901
1699 bp->vn_weight_sum = 0; 1902 bp->vn_weight_sum = 0;
1700 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 1903 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1701 int func = 2*vn + port; 1904 u32 vn_cfg = bp->mf_config[vn];
1702 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
1703 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1905 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1704 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1906 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1705 1907
@@ -1727,11 +1929,12 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1727 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 1929 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1728} 1930}
1729 1931
1730static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) 1932static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1731{ 1933{
1732 struct rate_shaping_vars_per_vn m_rs_vn; 1934 struct rate_shaping_vars_per_vn m_rs_vn;
1733 struct fairness_vars_per_vn m_fair_vn; 1935 struct fairness_vars_per_vn m_fair_vn;
1734 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config); 1936 u32 vn_cfg = bp->mf_config[vn];
1937 int func = 2*vn + BP_PORT(bp);
1735 u16 vn_min_rate, vn_max_rate; 1938 u16 vn_min_rate, vn_max_rate;
1736 int i; 1939 int i;
1737 1940
@@ -1744,7 +1947,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1744 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1947 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1745 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1948 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1746 /* If min rate is zero - set it to 1 */ 1949 /* If min rate is zero - set it to 1 */
1747 if (!vn_min_rate) 1950 if (bp->vn_weight_sum && (vn_min_rate == 0))
1748 vn_min_rate = DEF_MIN_RATE; 1951 vn_min_rate = DEF_MIN_RATE;
1749 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1952 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1750 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 1953 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
@@ -1807,7 +2010,7 @@ static void bnx2x_read_mf_cfg(struct bnx2x *bp)
1807 2010
1808 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2011 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1809 int /*abs*/func = 2*vn + BP_PORT(bp); 2012 int /*abs*/func = 2*vn + BP_PORT(bp);
1810 bp->mf_config = 2013 bp->mf_config[vn] =
1811 MF_CFG_RD(bp, func_mf_config[func].config); 2014 MF_CFG_RD(bp, func_mf_config[func].config);
1812 } 2015 }
1813} 2016}
@@ -1878,7 +2081,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
1878 if (bp->link_vars.link_up) { 2081 if (bp->link_vars.link_up) {
1879 2082
1880 /* dropless flow control */ 2083 /* dropless flow control */
1881 if (CHIP_IS_E1H(bp) && bp->dropless_fc) { 2084 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1882 int port = BP_PORT(bp); 2085 int port = BP_PORT(bp);
1883 u32 pause_enabled = 0; 2086 u32 pause_enabled = 0;
1884 2087
@@ -1906,37 +2109,19 @@ static void bnx2x_link_attn(struct bnx2x *bp)
1906 if (prev_link_status != bp->link_vars.link_status) 2109 if (prev_link_status != bp->link_vars.link_status)
1907 bnx2x_link_report(bp); 2110 bnx2x_link_report(bp);
1908 2111
1909 if (IS_MF(bp)) { 2112 if (IS_MF(bp))
1910 int port = BP_PORT(bp); 2113 bnx2x_link_sync_notify(bp);
1911 int func;
1912 int vn;
1913
1914 /* Set the attention towards other drivers on the same port */
1915 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1916 if (vn == BP_E1HVN(bp))
1917 continue;
1918
1919 func = ((vn << 1) | port);
1920 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1921 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1922 }
1923
1924 if (bp->link_vars.link_up) {
1925 int i;
1926
1927 /* Init rate shaping and fairness contexts */
1928 bnx2x_init_port_minmax(bp);
1929 2114
1930 for (vn = VN_0; vn < E1HVN_MAX; vn++) 2115 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
1931 bnx2x_init_vn_minmax(bp, 2*vn + port); 2116 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
1932 2117
1933 /* Store it to internal memory */ 2118 if (cmng_fns != CMNG_FNS_NONE) {
1934 for (i = 0; 2119 bnx2x_cmng_fns_init(bp, false, cmng_fns);
1935 i < sizeof(struct cmng_struct_per_port) / 4; i++) 2120 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
1936 REG_WR(bp, BAR_XSTRORM_INTMEM + 2121 } else
1937 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4, 2122 /* rate shaping and fairness are disabled */
1938 ((u32 *)(&bp->cmng))[i]); 2123 DP(NETIF_MSG_IFUP,
1939 } 2124 "single function mode without fairness\n");
1940 } 2125 }
1941} 2126}
1942 2127
@@ -1952,7 +2137,9 @@ void bnx2x__link_status_update(struct bnx2x *bp)
1952 else 2137 else
1953 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2138 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1954 2139
1955 bnx2x_calc_vn_weight_sum(bp); 2140 /* the link status update could be the result of a DCC event
2141 hence re-read the shmem mf configuration */
2142 bnx2x_read_mf_cfg(bp);
1956 2143
1957 /* indicate link status */ 2144 /* indicate link status */
1958 bnx2x_link_report(bp); 2145 bnx2x_link_report(bp);
@@ -1968,8 +2155,13 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
1968 2155
1969 /* enable nig attention */ 2156 /* enable nig attention */
1970 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); 2157 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1971 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2158 if (bp->common.int_block == INT_BLOCK_HC) {
1972 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2160 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2161 } else if (CHIP_IS_E2(bp)) {
2162 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2163 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2164 }
1973 2165
1974 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 2166 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1975} 2167}
@@ -1985,22 +2177,23 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
1985/* send the MCP a request, block until there is a reply */ 2177/* send the MCP a request, block until there is a reply */
1986u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) 2178u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
1987{ 2179{
1988 int func = BP_FUNC(bp); 2180 int mb_idx = BP_FW_MB_IDX(bp);
1989 u32 seq = ++bp->fw_seq; 2181 u32 seq = ++bp->fw_seq;
1990 u32 rc = 0; 2182 u32 rc = 0;
1991 u32 cnt = 1; 2183 u32 cnt = 1;
1992 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2184 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1993 2185
1994 mutex_lock(&bp->fw_mb_mutex); 2186 mutex_lock(&bp->fw_mb_mutex);
1995 SHMEM_WR(bp, func_mb[func].drv_mb_param, param); 2187 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
1996 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 2188 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2189
1997 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 2190 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1998 2191
1999 do { 2192 do {
2000 /* let the FW do it's magic ... */ 2193 /* let the FW do it's magic ... */
2001 msleep(delay); 2194 msleep(delay);
2002 2195
2003 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 2196 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2004 2197
2005 /* Give the FW up to 5 second (500*10ms) */ 2198 /* Give the FW up to 5 second (500*10ms) */
2006 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2199 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
@@ -2264,10 +2457,28 @@ void bnx2x_pf_init(struct bnx2x *bp)
2264 if (!CHIP_IS_E1(bp)) 2457 if (!CHIP_IS_E1(bp))
2265 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp)); 2458 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2266 2459
2460 if (CHIP_IS_E2(bp)) {
2461 /* reset IGU PF statistics: MSIX + ATTN */
2462 /* PF */
2463 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2464 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2465 (CHIP_MODE_IS_4_PORT(bp) ?
2466 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2467 /* ATTN */
2468 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2469 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2470 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2471 (CHIP_MODE_IS_4_PORT(bp) ?
2472 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2473 }
2474
2267 /* function setup flags */ 2475 /* function setup flags */
2268 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 2476 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2269 2477
2270 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; 2478 if (CHIP_IS_E1x(bp))
2479 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2480 else
2481 flags |= FUNC_FLG_TPA;
2271 2482
2272 /** 2483 /**
2273 * Although RSS is meaningless when there is a single HW queue we 2484 * Although RSS is meaningless when there is a single HW queue we
@@ -2361,7 +2572,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2361 * where the bp->flags can change so it is done without any 2572 * where the bp->flags can change so it is done without any
2362 * locks 2573 * locks
2363 */ 2574 */
2364 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 2575 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2365 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); 2576 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2366 bp->flags |= MF_FUNC_DIS; 2577 bp->flags |= MF_FUNC_DIS;
2367 2578
@@ -2548,14 +2759,13 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2548static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 2759static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2549{ 2760{
2550 int port = BP_PORT(bp); 2761 int port = BP_PORT(bp);
2551 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2552 COMMAND_REG_ATTN_BITS_SET);
2553 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2762 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2554 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2763 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2555 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2764 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2556 NIG_REG_MASK_INTERRUPT_PORT0; 2765 NIG_REG_MASK_INTERRUPT_PORT0;
2557 u32 aeu_mask; 2766 u32 aeu_mask;
2558 u32 nig_mask = 0; 2767 u32 nig_mask = 0;
2768 u32 reg_addr;
2559 2769
2560 if (bp->attn_state & asserted) 2770 if (bp->attn_state & asserted)
2561 BNX2X_ERR("IGU ERROR\n"); 2771 BNX2X_ERR("IGU ERROR\n");
@@ -2630,9 +2840,15 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2630 2840
2631 } /* if hardwired */ 2841 } /* if hardwired */
2632 2842
2633 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", 2843 if (bp->common.int_block == INT_BLOCK_HC)
2634 asserted, hc_addr); 2844 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2635 REG_WR(bp, hc_addr, asserted); 2845 COMMAND_REG_ATTN_BITS_SET);
2846 else
2847 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2848
2849 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2850 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2851 REG_WR(bp, reg_addr, asserted);
2636 2852
2637 /* now set back the mask */ 2853 /* now set back the mask */
2638 if (asserted & ATTN_NIG_FOR_FUNC) { 2854 if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -2753,6 +2969,10 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2753 /* RQ_USDMDP_FIFO_OVERFLOW */ 2969 /* RQ_USDMDP_FIFO_OVERFLOW */
2754 if (val & 0x18000) 2970 if (val & 0x18000)
2755 BNX2X_ERR("FATAL error from PXP\n"); 2971 BNX2X_ERR("FATAL error from PXP\n");
2972 if (CHIP_IS_E2(bp)) {
2973 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2974 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2975 }
2756 } 2976 }
2757 2977
2758 if (attn & HW_INTERRUT_ASSERT_SET_2) { 2978 if (attn & HW_INTERRUT_ASSERT_SET_2) {
@@ -2783,9 +3003,10 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2783 int func = BP_FUNC(bp); 3003 int func = BP_FUNC(bp);
2784 3004
2785 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3005 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2786 bp->mf_config = 3006 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
2787 MF_CFG_RD(bp, func_mf_config[func].config); 3007 func_mf_config[BP_ABS_FUNC(bp)].config);
2788 val = SHMEM_RD(bp, func_mb[func].drv_status); 3008 val = SHMEM_RD(bp,
3009 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2789 if (val & DRV_STATUS_DCC_EVENT_MASK) 3010 if (val & DRV_STATUS_DCC_EVENT_MASK)
2790 bnx2x_dcc_event(bp, 3011 bnx2x_dcc_event(bp,
2791 (val & DRV_STATUS_DCC_EVENT_MASK)); 3012 (val & DRV_STATUS_DCC_EVENT_MASK));
@@ -2815,13 +3036,13 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2815 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 3036 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2816 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); 3037 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2817 if (attn & BNX2X_GRC_TIMEOUT) { 3038 if (attn & BNX2X_GRC_TIMEOUT) {
2818 val = CHIP_IS_E1H(bp) ? 3039 val = CHIP_IS_E1(bp) ? 0 :
2819 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0; 3040 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
2820 BNX2X_ERR("GRC time-out 0x%08x\n", val); 3041 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2821 } 3042 }
2822 if (attn & BNX2X_GRC_RSV) { 3043 if (attn & BNX2X_GRC_RSV) {
2823 val = CHIP_IS_E1H(bp) ? 3044 val = CHIP_IS_E1(bp) ? 0 :
2824 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0; 3045 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
2825 BNX2X_ERR("GRC reserved 0x%08x\n", val); 3046 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2826 } 3047 }
2827 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 3048 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
@@ -3126,6 +3347,74 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3126 attn.sig[3]); 3347 attn.sig[3]);
3127} 3348}
3128 3349
3350
3351static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3352{
3353 u32 val;
3354 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3355
3356 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3357 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3358 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3359 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3360 "ADDRESS_ERROR\n");
3361 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3362 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3363 "INCORRECT_RCV_BEHAVIOR\n");
3364 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3365 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3366 "WAS_ERROR_ATTN\n");
3367 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3368 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3369 "VF_LENGTH_VIOLATION_ATTN\n");
3370 if (val &
3371 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3374 if (val &
3375 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3376 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3377 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3378 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3379 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3380 "TCPL_ERROR_ATTN\n");
3381 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3382 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3383 "TCPL_IN_TWO_RCBS_ATTN\n");
3384 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3385 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3386 "CSSNOOP_FIFO_OVERFLOW\n");
3387 }
3388 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3389 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3390 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3391 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3392 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3393 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3394 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3395 "_ATC_TCPL_TO_NOT_PEND\n");
3396 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3397 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3398 "ATC_GPA_MULTIPLE_HITS\n");
3399 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3400 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3401 "ATC_RCPL_TO_EMPTY_CNT\n");
3402 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3403 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3404 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3405 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3406 "ATC_IREQ_LESS_THAN_STU\n");
3407 }
3408
3409 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3410 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3411 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3412 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3413 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3414 }
3415
3416}
3417
3129static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 3418static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3130{ 3419{
3131 struct attn_route attn, *group_mask; 3420 struct attn_route attn, *group_mask;
@@ -3156,17 +3445,28 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3156 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 3445 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3157 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 3446 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3158 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 3447 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3159 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n", 3448 if (CHIP_IS_E2(bp))
3160 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]); 3449 attn.sig[4] =
3450 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3451 else
3452 attn.sig[4] = 0;
3453
3454 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3455 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3161 3456
3162 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 3457 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3163 if (deasserted & (1 << index)) { 3458 if (deasserted & (1 << index)) {
3164 group_mask = &bp->attn_group[index]; 3459 group_mask = &bp->attn_group[index];
3165 3460
3166 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", 3461 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3167 index, group_mask->sig[0], group_mask->sig[1], 3462 "%08x %08x %08x\n",
3168 group_mask->sig[2], group_mask->sig[3]); 3463 index,
3464 group_mask->sig[0], group_mask->sig[1],
3465 group_mask->sig[2], group_mask->sig[3],
3466 group_mask->sig[4]);
3169 3467
3468 bnx2x_attn_int_deasserted4(bp,
3469 attn.sig[4] & group_mask->sig[4]);
3170 bnx2x_attn_int_deasserted3(bp, 3470 bnx2x_attn_int_deasserted3(bp,
3171 attn.sig[3] & group_mask->sig[3]); 3471 attn.sig[3] & group_mask->sig[3]);
3172 bnx2x_attn_int_deasserted1(bp, 3472 bnx2x_attn_int_deasserted1(bp,
@@ -3180,11 +3480,15 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3180 3480
3181 bnx2x_release_alr(bp); 3481 bnx2x_release_alr(bp);
3182 3482
3183 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); 3483 if (bp->common.int_block == INT_BLOCK_HC)
3484 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3485 COMMAND_REG_ATTN_BITS_CLR);
3486 else
3487 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3184 3488
3185 val = ~deasserted; 3489 val = ~deasserted;
3186 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", 3490 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3187 val, reg_addr); 3491 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3188 REG_WR(bp, reg_addr, val); 3492 REG_WR(bp, reg_addr, val);
3189 3493
3190 if (~bp->attn_state & deasserted) 3494 if (~bp->attn_state & deasserted)
@@ -3471,7 +3775,7 @@ static void bnx2x_timer(unsigned long data)
3471 } 3775 }
3472 3776
3473 if (!BP_NOMCP(bp)) { 3777 if (!BP_NOMCP(bp)) {
3474 int func = BP_FUNC(bp); 3778 int mb_idx = BP_FW_MB_IDX(bp);
3475 u32 drv_pulse; 3779 u32 drv_pulse;
3476 u32 mcp_pulse; 3780 u32 mcp_pulse;
3477 3781
@@ -3479,9 +3783,9 @@ static void bnx2x_timer(unsigned long data)
3479 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 3783 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3480 /* TBD - add SYSTEM_TIME */ 3784 /* TBD - add SYSTEM_TIME */
3481 drv_pulse = bp->fw_drv_pulse_wr_seq; 3785 drv_pulse = bp->fw_drv_pulse_wr_seq;
3482 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse); 3786 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3483 3787
3484 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) & 3788 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3485 MCP_PULSE_SEQ_MASK); 3789 MCP_PULSE_SEQ_MASK);
3486 /* The delta between driver pulse and mcp response 3790 /* The delta between driver pulse and mcp response
3487 * should be 1 (before mcp response) or 0 (after mcp response) 3791 * should be 1 (before mcp response) or 0 (after mcp response)
@@ -3539,17 +3843,26 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3539{ 3843{
3540 u32 *sb_data_p; 3844 u32 *sb_data_p;
3541 u32 data_size = 0; 3845 u32 data_size = 0;
3846 struct hc_status_block_data_e2 sb_data_e2;
3542 struct hc_status_block_data_e1x sb_data_e1x; 3847 struct hc_status_block_data_e1x sb_data_e1x;
3543 3848
3544 /* disable the function first */ 3849 /* disable the function first */
3545 memset(&sb_data_e1x, 0, 3850 if (CHIP_IS_E2(bp)) {
3546 sizeof(struct hc_status_block_data_e1x)); 3851 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3547 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED; 3852 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3548 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED; 3853 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3549 sb_data_e1x.common.p_func.vf_valid = false; 3854 sb_data_e2.common.p_func.vf_valid = false;
3550 sb_data_p = (u32 *)&sb_data_e1x; 3855 sb_data_p = (u32 *)&sb_data_e2;
3551 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 3856 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3552 3857 } else {
3858 memset(&sb_data_e1x, 0,
3859 sizeof(struct hc_status_block_data_e1x));
3860 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3861 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3862 sb_data_e1x.common.p_func.vf_valid = false;
3863 sb_data_p = (u32 *)&sb_data_e1x;
3864 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3865 }
3553 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 3866 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3554 3867
3555 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 3868 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
@@ -3610,30 +3923,48 @@ void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3610{ 3923{
3611 int igu_seg_id; 3924 int igu_seg_id;
3612 3925
3926 struct hc_status_block_data_e2 sb_data_e2;
3613 struct hc_status_block_data_e1x sb_data_e1x; 3927 struct hc_status_block_data_e1x sb_data_e1x;
3614 struct hc_status_block_sm *hc_sm_p; 3928 struct hc_status_block_sm *hc_sm_p;
3615 struct hc_index_data *hc_index_p; 3929 struct hc_index_data *hc_index_p;
3616 int data_size; 3930 int data_size;
3617 u32 *sb_data_p; 3931 u32 *sb_data_p;
3618 3932
3619 igu_seg_id = HC_SEG_ACCESS_NORM; 3933 if (CHIP_INT_MODE_IS_BC(bp))
3934 igu_seg_id = HC_SEG_ACCESS_NORM;
3935 else
3936 igu_seg_id = IGU_SEG_ACCESS_NORM;
3620 3937
3621 bnx2x_zero_fp_sb(bp, fw_sb_id); 3938 bnx2x_zero_fp_sb(bp, fw_sb_id);
3622 3939
3623 memset(&sb_data_e1x, 0, 3940 if (CHIP_IS_E2(bp)) {
3624 sizeof(struct hc_status_block_data_e1x)); 3941 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3625 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); 3942 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3626 sb_data_e1x.common.p_func.vf_id = 0xff; 3943 sb_data_e2.common.p_func.vf_id = vfid;
3627 sb_data_e1x.common.p_func.vf_valid = false; 3944 sb_data_e2.common.p_func.vf_valid = vf_valid;
3628 sb_data_e1x.common.p_func.vnic_id = BP_E1HVN(bp); 3945 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3629 sb_data_e1x.common.same_igu_sb_1b = true; 3946 sb_data_e2.common.same_igu_sb_1b = true;
3630 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); 3947 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3631 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); 3948 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3632 hc_sm_p = sb_data_e1x.common.state_machine; 3949 hc_sm_p = sb_data_e2.common.state_machine;
3633 hc_index_p = sb_data_e1x.index_data; 3950 hc_index_p = sb_data_e2.index_data;
3634 sb_data_p = (u32 *)&sb_data_e1x; 3951 sb_data_p = (u32 *)&sb_data_e2;
3635 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 3952 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3636 3953 } else {
3954 memset(&sb_data_e1x, 0,
3955 sizeof(struct hc_status_block_data_e1x));
3956 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3957 sb_data_e1x.common.p_func.vf_id = 0xff;
3958 sb_data_e1x.common.p_func.vf_valid = false;
3959 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3960 sb_data_e1x.common.same_igu_sb_1b = true;
3961 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3962 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3963 hc_sm_p = sb_data_e1x.common.state_machine;
3964 hc_index_p = sb_data_e1x.index_data;
3965 sb_data_p = (u32 *)&sb_data_e1x;
3966 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3967 }
3637 3968
3638 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 3969 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3639 igu_sb_id, igu_seg_id); 3970 igu_sb_id, igu_seg_id);
@@ -3666,6 +3997,7 @@ static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3666 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX, 3997 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3667 false, tx_usec); 3998 false, tx_usec);
3668} 3999}
4000
3669static void bnx2x_init_def_sb(struct bnx2x *bp) 4001static void bnx2x_init_def_sb(struct bnx2x *bp)
3670{ 4002{
3671 struct host_sp_status_block *def_sb = bp->def_status_blk; 4003 struct host_sp_status_block *def_sb = bp->def_status_blk;
@@ -3680,8 +4012,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
3680 struct hc_sp_status_block_data sp_sb_data; 4012 struct hc_sp_status_block_data sp_sb_data;
3681 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 4013 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3682 4014
3683 igu_sp_sb_index = DEF_SB_IGU_ID; 4015 if (CHIP_INT_MODE_IS_BC(bp)) {
3684 igu_seg_id = HC_SEG_ACCESS_DEF; 4016 igu_sp_sb_index = DEF_SB_IGU_ID;
4017 igu_seg_id = HC_SEG_ACCESS_DEF;
4018 } else {
4019 igu_sp_sb_index = bp->igu_dsb_id;
4020 igu_seg_id = IGU_SEG_ACCESS_DEF;
4021 }
3685 4022
3686 /* ATTN */ 4023 /* ATTN */
3687 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 4024 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
@@ -3698,12 +4035,29 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
3698 for (sindex = 0; sindex < 4; sindex++) 4035 for (sindex = 0; sindex < 4; sindex++)
3699 bp->attn_group[index].sig[sindex] = 4036 bp->attn_group[index].sig[sindex] =
3700 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); 4037 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4038
4039 if (CHIP_IS_E2(bp))
4040 /*
4041 * enable5 is separate from the rest of the registers,
4042 * and therefore the address skip is 4
4043 * and not 16 between the different groups
4044 */
4045 bp->attn_group[index].sig[4] = REG_RD(bp,
4046 reg_offset + 0x10 + 0x4*index);
4047 else
4048 bp->attn_group[index].sig[4] = 0;
3701 } 4049 }
3702 4050
3703 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 4051 if (bp->common.int_block == INT_BLOCK_HC) {
3704 HC_REG_ATTN_MSG0_ADDR_L); 4052 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
3705 REG_WR(bp, reg_offset, U64_LO(section)); 4053 HC_REG_ATTN_MSG0_ADDR_L);
3706 REG_WR(bp, reg_offset + 4, U64_HI(section)); 4054
4055 REG_WR(bp, reg_offset, U64_LO(section));
4056 REG_WR(bp, reg_offset + 4, U64_HI(section));
4057 } else if (CHIP_IS_E2(bp)) {
4058 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4059 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4060 }
3707 4061
3708 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 4062 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
3709 sp_sb); 4063 sp_sb);
@@ -3715,7 +4069,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
3715 sp_sb_data.igu_sb_id = igu_sp_sb_index; 4069 sp_sb_data.igu_sb_id = igu_sp_sb_index;
3716 sp_sb_data.igu_seg_id = igu_seg_id; 4070 sp_sb_data.igu_seg_id = igu_seg_id;
3717 sp_sb_data.p_func.pf_id = func; 4071 sp_sb_data.p_func.pf_id = func;
3718 sp_sb_data.p_func.vnic_id = BP_E1HVN(bp); 4072 sp_sb_data.p_func.vnic_id = BP_VN(bp);
3719 sp_sb_data.p_func.vf_id = 0xff; 4073 sp_sb_data.p_func.vf_id = 0xff;
3720 4074
3721 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 4075 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
@@ -3870,6 +4224,11 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
3870 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 4224 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3871 REG_WR(bp, BAR_USTRORM_INTMEM + 4225 REG_WR(bp, BAR_USTRORM_INTMEM +
3872 USTORM_AGG_DATA_OFFSET + i * 4, 0); 4226 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4227 if (CHIP_IS_E2(bp)) {
4228 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4229 CHIP_INT_MODE_IS_BC(bp) ?
4230 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4231 }
3873} 4232}
3874 4233
3875static void bnx2x_init_internal_port(struct bnx2x *bp) 4234static void bnx2x_init_internal_port(struct bnx2x *bp)
@@ -3881,6 +4240,7 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3881{ 4240{
3882 switch (load_code) { 4241 switch (load_code) {
3883 case FW_MSG_CODE_DRV_LOAD_COMMON: 4242 case FW_MSG_CODE_DRV_LOAD_COMMON:
4243 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
3884 bnx2x_init_internal_common(bp); 4244 bnx2x_init_internal_common(bp);
3885 /* no break */ 4245 /* no break */
3886 4246
@@ -3911,9 +4271,11 @@ static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
3911 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE; 4271 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
3912 /* qZone id equals to FW (per path) client id */ 4272 /* qZone id equals to FW (per path) client id */
3913 fp->cl_qzone_id = fp->cl_id + 4273 fp->cl_qzone_id = fp->cl_id +
3914 BP_PORT(bp)*(ETH_MAX_RX_CLIENTS_E1H); 4274 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4275 ETH_MAX_RX_CLIENTS_E1H);
3915 /* init shortcut */ 4276 /* init shortcut */
3916 fp->ustorm_rx_prods_offset = 4277 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4278 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
3917 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 4279 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
3918 /* Setup SB indicies */ 4280 /* Setup SB indicies */
3919 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 4281 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
@@ -4248,9 +4610,19 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4248static void enable_blocks_attention(struct bnx2x *bp) 4610static void enable_blocks_attention(struct bnx2x *bp)
4249{ 4611{
4250 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 4612 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4251 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); 4613 if (CHIP_IS_E2(bp))
4614 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4615 else
4616 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4252 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 4617 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4253 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 4618 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4619 /*
4620 * mask read length error interrupts in brb for parser
4621 * (parsing unit and 'checksum and crc' unit)
4622 * these errors are legal (PU reads fixed length and CAC can cause
4623 * read length error on truncated packets)
4624 */
4625 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4254 REG_WR(bp, QM_REG_QM_INT_MASK, 0); 4626 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4255 REG_WR(bp, TM_REG_TM_INT_MASK, 0); 4627 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4256 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); 4628 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
@@ -4271,6 +4643,13 @@ static void enable_blocks_attention(struct bnx2x *bp)
4271/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 4643/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4272 if (CHIP_REV_IS_FPGA(bp)) 4644 if (CHIP_REV_IS_FPGA(bp))
4273 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); 4645 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4646 else if (CHIP_IS_E2(bp))
4647 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4648 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4649 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4650 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4651 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4652 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4274 else 4653 else
4275 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); 4654 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4276 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 4655 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
@@ -4288,11 +4667,11 @@ static const struct {
4288 u32 addr; 4667 u32 addr;
4289 u32 mask; 4668 u32 mask;
4290} bnx2x_parity_mask[] = { 4669} bnx2x_parity_mask[] = {
4291 {PXP_REG_PXP_PRTY_MASK, 0xffffffff}, 4670 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4292 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff}, 4671 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4293 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff}, 4672 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4294 {HC_REG_HC_PRTY_MASK, 0xffffffff}, 4673 {HC_REG_HC_PRTY_MASK, 0x7},
4295 {MISC_REG_MISC_PRTY_MASK, 0xffffffff}, 4674 {MISC_REG_MISC_PRTY_MASK, 0x1},
4296 {QM_REG_QM_PRTY_MASK, 0x0}, 4675 {QM_REG_QM_PRTY_MASK, 0x0},
4297 {DORQ_REG_DORQ_PRTY_MASK, 0x0}, 4676 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4298 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0}, 4677 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
@@ -4407,23 +4786,97 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4407 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 4786 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4408} 4787}
4409 4788
4789static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4790{
4791 u32 offset = 0;
4792
4793 if (CHIP_IS_E1(bp))
4794 return;
4795 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4796 return;
4797
4798 switch (BP_ABS_FUNC(bp)) {
4799 case 0:
4800 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4801 break;
4802 case 1:
4803 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4804 break;
4805 case 2:
4806 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4807 break;
4808 case 3:
4809 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4810 break;
4811 case 4:
4812 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4813 break;
4814 case 5:
4815 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4816 break;
4817 case 6:
4818 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4819 break;
4820 case 7:
4821 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4822 break;
4823 default:
4824 return;
4825 }
4826
4827 REG_WR(bp, offset, pretend_func_num);
4828 REG_RD(bp, offset);
4829 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4830}
4831
4832static void bnx2x_pf_disable(struct bnx2x *bp)
4833{
4834 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4835 val &= ~IGU_PF_CONF_FUNC_EN;
4836
4837 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4838 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4839 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4840}
4841
4410static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) 4842static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4411{ 4843{
4412 u32 val, i; 4844 u32 val, i;
4413 4845
4414 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 4846 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
4415 4847
4416 bnx2x_reset_common(bp); 4848 bnx2x_reset_common(bp);
4417 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 4849 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4418 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); 4850 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4419 4851
4420 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE); 4852 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4421 if (CHIP_IS_E1H(bp)) 4853 if (!CHIP_IS_E1(bp))
4422 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp)); 4854 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4423 4855
4424 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); 4856 if (CHIP_IS_E2(bp)) {
4425 msleep(30); 4857 u8 fid;
4426 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0); 4858
4859 /**
4860 * 4-port mode or 2-port mode we need to turn of master-enable
4861 * for everyone, after that, turn it back on for self.
4862 * so, we disregard multi-function or not, and always disable
4863 * for all functions on the given path, this means 0,2,4,6 for
4864 * path 0 and 1,3,5,7 for path 1
4865 */
4866 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4867 if (fid == BP_ABS_FUNC(bp)) {
4868 REG_WR(bp,
4869 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4870 1);
4871 continue;
4872 }
4873
4874 bnx2x_pretend_func(bp, fid);
4875 /* clear pf enable */
4876 bnx2x_pf_disable(bp);
4877 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4878 }
4879 }
4427 4880
4428 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE); 4881 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
4429 if (CHIP_IS_E1(bp)) { 4882 if (CHIP_IS_E1(bp)) {
@@ -4471,9 +4924,65 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4471 return -EBUSY; 4924 return -EBUSY;
4472 } 4925 }
4473 4926
4927 /* Timers bug workaround E2 only. We need to set the entire ILT to
4928 * have entries with value "0" and valid bit on.
4929 * This needs to be done by the first PF that is loaded in a path
4930 * (i.e. common phase)
4931 */
4932 if (CHIP_IS_E2(bp)) {
4933 struct ilt_client_info ilt_cli;
4934 struct bnx2x_ilt ilt;
4935 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4936 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4937
4938 /* initalize dummy TM client */
4939 ilt_cli.start = 0;
4940 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4941 ilt_cli.client_num = ILT_CLIENT_TM;
4942
4943 /* Step 1: set zeroes to all ilt page entries with valid bit on
4944 * Step 2: set the timers first/last ilt entry to point
4945 * to the entire range to prevent ILT range error for 3rd/4th
4946 * vnic (this code assumes existance of the vnic)
4947 *
4948 * both steps performed by call to bnx2x_ilt_client_init_op()
4949 * with dummy TM client
4950 *
4951 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4952 * and his brother are split registers
4953 */
4954 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4955 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4956 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4957
4958 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4959 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4960 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4961 }
4962
4963
4474 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); 4964 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4475 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); 4965 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
4476 4966
4967 if (CHIP_IS_E2(bp)) {
4968 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4969 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4970 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4971
4972 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4973
4974 /* let the HW do it's magic ... */
4975 do {
4976 msleep(200);
4977 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4978 } while (factor-- && (val != 1));
4979
4980 if (val != 1) {
4981 BNX2X_ERR("ATC_INIT failed\n");
4982 return -EBUSY;
4983 }
4984 }
4985
4477 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE); 4986 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
4478 4987
4479 /* clean the DMAE memory */ 4988 /* clean the DMAE memory */
@@ -4492,6 +5001,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4492 5001
4493 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); 5002 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
4494 5003
5004 if (CHIP_MODE_IS_4_PORT(bp))
5005 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
4495 /* QM queues pointers table */ 5006 /* QM queues pointers table */
4496 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); 5007 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
4497 5008
@@ -4512,14 +5023,26 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4512 } 5023 }
4513 5024
4514 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); 5025 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5026 if (CHIP_MODE_IS_4_PORT(bp)) {
5027 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5028 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5029 }
5030
4515 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); 5031 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4516 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 5032 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4517#ifndef BCM_CNIC 5033#ifndef BCM_CNIC
4518 /* set NIC mode */ 5034 /* set NIC mode */
4519 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5035 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4520#endif 5036#endif
4521 if (CHIP_IS_E1H(bp)) 5037 if (!CHIP_IS_E1(bp))
4522 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp)); 5038 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
5039 if (CHIP_IS_E2(bp)) {
5040 /* Bit-map indicating which L2 hdrs may appear after the
5041 basic Ethernet header */
5042 int has_ovlan = IS_MF(bp);
5043 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5044 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5045 }
4523 5046
4524 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE); 5047 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4525 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE); 5048 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
@@ -4536,6 +5059,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4536 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE); 5059 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4537 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE); 5060 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4538 5061
5062 if (CHIP_MODE_IS_4_PORT(bp))
5063 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5064
4539 /* sync semi rtc */ 5065 /* sync semi rtc */
4540 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 5066 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4541 0x80000000); 5067 0x80000000);
@@ -4546,6 +5072,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4546 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE); 5072 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4547 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); 5073 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4548 5074
5075 if (CHIP_IS_E2(bp)) {
5076 int has_ovlan = IS_MF(bp);
5077 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5078 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5079 }
5080
4549 REG_WR(bp, SRC_REG_SOFT_RST, 1); 5081 REG_WR(bp, SRC_REG_SOFT_RST, 1);
4550 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) 5082 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4551 REG_WR(bp, i, random32()); 5083 REG_WR(bp, i, random32());
@@ -4583,6 +5115,11 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4583 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); 5115 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4584 5116
4585 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE); 5117 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5118
5119 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5120 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5121
5122 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
4586 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE); 5123 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4587 5124
4588 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE); 5125 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
@@ -4590,16 +5127,35 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4590 REG_WR(bp, 0x2814, 0xffffffff); 5127 REG_WR(bp, 0x2814, 0xffffffff);
4591 REG_WR(bp, 0x3820, 0xffffffff); 5128 REG_WR(bp, 0x3820, 0xffffffff);
4592 5129
5130 if (CHIP_IS_E2(bp)) {
5131 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5132 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5133 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5134 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5135 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5136 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5137 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5138 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5139 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5140 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5141 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5142 }
5143
4593 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE); 5144 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4594 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE); 5145 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4595 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE); 5146 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4596 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE); 5147 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4597 5148
4598 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); 5149 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4599 if (CHIP_IS_E1H(bp)) { 5150 if (!CHIP_IS_E1(bp)) {
4600 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 5151 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
4601 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp)); 5152 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
4602 } 5153 }
5154 if (CHIP_IS_E2(bp)) {
5155 /* Bit-map indicating which L2 hdrs may appear after the
5156 basic Ethernet header */
5157 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5158 }
4603 5159
4604 if (CHIP_REV_IS_SLOW(bp)) 5160 if (CHIP_REV_IS_SLOW(bp))
4605 msleep(200); 5161 msleep(200);
@@ -4622,15 +5178,17 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4622 } 5178 }
4623 REG_WR(bp, CFC_REG_DEBUG0, 0); 5179 REG_WR(bp, CFC_REG_DEBUG0, 0);
4624 5180
4625 /* read NIG statistic 5181 if (CHIP_IS_E1(bp)) {
4626 to see if this is our first up since powerup */ 5182 /* read NIG statistic
4627 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 5183 to see if this is our first up since powerup */
4628 val = *bnx2x_sp(bp, wb_data[0]); 5184 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5185 val = *bnx2x_sp(bp, wb_data[0]);
4629 5186
4630 /* do internal memory self test */ 5187 /* do internal memory self test */
4631 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) { 5188 if ((val == 0) && bnx2x_int_mem_test(bp)) {
4632 BNX2X_ERR("internal mem self test failed\n"); 5189 BNX2X_ERR("internal mem self test failed\n");
4633 return -EBUSY; 5190 return -EBUSY;
5191 }
4634 } 5192 }
4635 5193
4636 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, 5194 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
@@ -4647,10 +5205,23 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4647 enable_blocks_parity(bp); 5205 enable_blocks_parity(bp);
4648 5206
4649 if (!BP_NOMCP(bp)) { 5207 if (!BP_NOMCP(bp)) {
4650 bnx2x_acquire_phy_lock(bp); 5208 /* In E2 2-PORT mode, same ext phy is used for the two paths */
4651 bnx2x_common_init_phy(bp, bp->common.shmem_base, 5209 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
4652 bp->common.shmem2_base); 5210 CHIP_IS_E1x(bp)) {
4653 bnx2x_release_phy_lock(bp); 5211 u32 shmem_base[2], shmem2_base[2];
5212 shmem_base[0] = bp->common.shmem_base;
5213 shmem2_base[0] = bp->common.shmem2_base;
5214 if (CHIP_IS_E2(bp)) {
5215 shmem_base[1] =
5216 SHMEM2_RD(bp, other_shmem_base_addr);
5217 shmem2_base[1] =
5218 SHMEM2_RD(bp, other_shmem2_base_addr);
5219 }
5220 bnx2x_acquire_phy_lock(bp);
5221 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5222 bp->common.chip_id);
5223 bnx2x_release_phy_lock(bp);
5224 }
4654 } else 5225 } else
4655 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 5226 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4656 5227
@@ -4671,6 +5242,14 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
4671 bnx2x_init_block(bp, PXP_BLOCK, init_stage); 5242 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4672 bnx2x_init_block(bp, PXP2_BLOCK, init_stage); 5243 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4673 5244
5245 /* Timers bug workaround: disables the pf_master bit in pglue at
5246 * common phase, we need to enable it here before any dmae access are
5247 * attempted. Therefore we manually added the enable-master to the
5248 * port phase (it also happens in the function phase)
5249 */
5250 if (CHIP_IS_E2(bp))
5251 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5252
4674 bnx2x_init_block(bp, TCM_BLOCK, init_stage); 5253 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4675 bnx2x_init_block(bp, UCM_BLOCK, init_stage); 5254 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4676 bnx2x_init_block(bp, CCM_BLOCK, init_stage); 5255 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
@@ -4687,29 +5266,41 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
4687 5266
4688 bnx2x_init_block(bp, DQ_BLOCK, init_stage); 5267 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4689 5268
4690 bnx2x_init_block(bp, BRB1_BLOCK, init_stage); 5269 if (CHIP_MODE_IS_4_PORT(bp))
4691 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) { 5270 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
4692 /* no pause for emulation and FPGA */ 5271
4693 low = 0; 5272 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
4694 high = 513; 5273 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4695 } else { 5274 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
4696 if (IS_MF(bp)) 5275 /* no pause for emulation and FPGA */
4697 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 5276 low = 0;
4698 else if (bp->dev->mtu > 4096) { 5277 high = 513;
4699 if (bp->flags & ONE_PORT_FLAG) 5278 } else {
4700 low = 160; 5279 if (IS_MF(bp))
4701 else { 5280 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4702 val = bp->dev->mtu; 5281 else if (bp->dev->mtu > 4096) {
4703 /* (24*1024 + val*4)/256 */ 5282 if (bp->flags & ONE_PORT_FLAG)
4704 low = 96 + (val/64) + ((val % 64) ? 1 : 0); 5283 low = 160;
4705 } 5284 else {
4706 } else 5285 val = bp->dev->mtu;
4707 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); 5286 /* (24*1024 + val*4)/256 */
4708 high = low + 56; /* 14*1024/256 */ 5287 low = 96 + (val/64) +
5288 ((val % 64) ? 1 : 0);
5289 }
5290 } else
5291 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5292 high = low + 56; /* 14*1024/256 */
5293 }
5294 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5295 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4709 } 5296 }
4710 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4711 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4712 5297
5298 if (CHIP_MODE_IS_4_PORT(bp)) {
5299 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5300 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5301 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5302 BRB1_REG_MAC_GUARANTIED_0), 40);
5303 }
4713 5304
4714 bnx2x_init_block(bp, PRS_BLOCK, init_stage); 5305 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4715 5306
@@ -4722,24 +5313,28 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
4722 bnx2x_init_block(bp, USEM_BLOCK, init_stage); 5313 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4723 bnx2x_init_block(bp, CSEM_BLOCK, init_stage); 5314 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4724 bnx2x_init_block(bp, XSEM_BLOCK, init_stage); 5315 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5316 if (CHIP_MODE_IS_4_PORT(bp))
5317 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
4725 5318
4726 bnx2x_init_block(bp, UPB_BLOCK, init_stage); 5319 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4727 bnx2x_init_block(bp, XPB_BLOCK, init_stage); 5320 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4728 5321
4729 bnx2x_init_block(bp, PBF_BLOCK, init_stage); 5322 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4730 5323
4731 /* configure PBF to work without PAUSE mtu 9000 */ 5324 if (!CHIP_IS_E2(bp)) {
4732 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 5325 /* configure PBF to work without PAUSE mtu 9000 */
5326 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4733 5327
4734 /* update threshold */ 5328 /* update threshold */
4735 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 5329 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4736 /* update init credit */ 5330 /* update init credit */
4737 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 5331 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4738 5332
4739 /* probe changes */ 5333 /* probe changes */
4740 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); 5334 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4741 msleep(5); 5335 udelay(50);
4742 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 5336 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5337 }
4743 5338
4744#ifdef BCM_CNIC 5339#ifdef BCM_CNIC
4745 bnx2x_init_block(bp, SRCH_BLOCK, init_stage); 5340 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
@@ -4753,6 +5348,8 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
4753 } 5348 }
4754 bnx2x_init_block(bp, HC_BLOCK, init_stage); 5349 bnx2x_init_block(bp, HC_BLOCK, init_stage);
4755 5350
5351 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5352
4756 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage); 5353 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4757 /* init aeu_mask_attn_func_0/1: 5354 /* init aeu_mask_attn_func_0/1:
4758 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 5355 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
@@ -4771,11 +5368,25 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
4771 5368
4772 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 5369 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4773 5370
4774 if (CHIP_IS_E1H(bp)) { 5371 if (!CHIP_IS_E1(bp)) {
4775 /* 0x2 disable mf_ov, 0x1 enable */ 5372 /* 0x2 disable mf_ov, 0x1 enable */
4776 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 5373 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4777 (IS_MF(bp) ? 0x1 : 0x2)); 5374 (IS_MF(bp) ? 0x1 : 0x2));
4778 5375
5376 if (CHIP_IS_E2(bp)) {
5377 val = 0;
5378 switch (bp->mf_mode) {
5379 case MULTI_FUNCTION_SD:
5380 val = 1;
5381 break;
5382 case MULTI_FUNCTION_SI:
5383 val = 2;
5384 break;
5385 }
5386
5387 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5388 NIG_REG_LLH0_CLS_TYPE), val);
5389 }
4779 { 5390 {
4780 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 5391 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4781 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 5392 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
@@ -4805,14 +5416,26 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4805{ 5416{
4806 int reg; 5417 int reg;
4807 5418
4808 if (CHIP_IS_E1H(bp)) 5419 if (CHIP_IS_E1(bp))
4809 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4810 else /* E1 */
4811 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 5420 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5421 else
5422 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4812 5423
4813 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); 5424 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4814} 5425}
4815 5426
5427static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5428{
5429 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5430}
5431
5432static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5433{
5434 u32 i, base = FUNC_ILT_BASE(func);
5435 for (i = base; i < base + ILT_PER_FUNC; i++)
5436 bnx2x_ilt_wr(bp, i, 0);
5437}
5438
4816static int bnx2x_init_hw_func(struct bnx2x *bp) 5439static int bnx2x_init_hw_func(struct bnx2x *bp)
4817{ 5440{
4818 int port = BP_PORT(bp); 5441 int port = BP_PORT(bp);
@@ -4825,10 +5448,12 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
4825 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); 5448 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
4826 5449
4827 /* set MSI reconfigure capability */ 5450 /* set MSI reconfigure capability */
4828 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 5451 if (bp->common.int_block == INT_BLOCK_HC) {
4829 val = REG_RD(bp, addr); 5452 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4830 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 5453 val = REG_RD(bp, addr);
4831 REG_WR(bp, addr, val); 5454 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5455 REG_WR(bp, addr, val);
5456 }
4832 5457
4833 ilt = BP_ILT(bp); 5458 ilt = BP_ILT(bp);
4834 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 5459 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
@@ -4854,10 +5479,38 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
4854 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5479 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4855#endif /* BCM_CNIC */ 5480#endif /* BCM_CNIC */
4856 5481
5482 if (CHIP_IS_E2(bp)) {
5483 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5484
5485 /* Turn on a single ISR mode in IGU if driver is going to use
5486 * INT#x or MSI
5487 */
5488 if (!(bp->flags & USING_MSIX_FLAG))
5489 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5490 /*
5491 * Timers workaround bug: function init part.
5492 * Need to wait 20msec after initializing ILT,
5493 * needed to make sure there are no requests in
5494 * one of the PXP internal queues with "old" ILT addresses
5495 */
5496 msleep(20);
5497 /*
5498 * Master enable - Due to WB DMAE writes performed before this
5499 * register is re-initialized as part of the regular function
5500 * init
5501 */
5502 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5503 /* Enable the function in IGU */
5504 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5505 }
5506
4857 bp->dmae_ready = 1; 5507 bp->dmae_ready = 1;
4858 5508
4859 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func); 5509 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
4860 5510
5511 if (CHIP_IS_E2(bp))
5512 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5513
4861 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); 5514 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4862 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func); 5515 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4863 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func); 5516 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
@@ -4868,7 +5521,24 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
4868 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func); 5521 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4869 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func); 5522 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4870 5523
5524 if (CHIP_IS_E2(bp)) {
5525 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5526 BP_PATH(bp));
5527 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5528 BP_PATH(bp));
5529 }
5530
5531 if (CHIP_MODE_IS_4_PORT(bp))
5532 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5533
5534 if (CHIP_IS_E2(bp))
5535 REG_WR(bp, QM_REG_PF_EN, 1);
5536
4871 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func); 5537 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5538
5539 if (CHIP_MODE_IS_4_PORT(bp))
5540 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5541
4872 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func); 5542 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
4873 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func); 5543 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
4874 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func); 5544 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
@@ -4880,10 +5550,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
4880 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func); 5550 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
4881 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func); 5551 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
4882 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func); 5552 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5553 if (CHIP_IS_E2(bp))
5554 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5555
4883 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func); 5556 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
4884 5557
4885 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func); 5558 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
4886 5559
5560 if (CHIP_IS_E2(bp))
5561 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5562
4887 if (IS_MF(bp)) { 5563 if (IS_MF(bp)) {
4888 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 5564 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4889 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); 5565 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
@@ -4892,13 +5568,117 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
4892 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func); 5568 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
4893 5569
4894 /* HC init per function */ 5570 /* HC init per function */
4895 if (CHIP_IS_E1H(bp)) { 5571 if (bp->common.int_block == INT_BLOCK_HC) {
5572 if (CHIP_IS_E1H(bp)) {
5573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5574
5575 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5576 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5577 }
5578 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5579
5580 } else {
5581 int num_segs, sb_idx, prod_offset;
5582
4896 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 5583 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4897 5584
4898 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 5585 if (CHIP_IS_E2(bp)) {
4899 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 5586 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5587 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5588 }
5589
5590 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5591
5592 if (CHIP_IS_E2(bp)) {
5593 int dsb_idx = 0;
5594 /**
5595 * Producer memory:
5596 * E2 mode: address 0-135 match to the mapping memory;
5597 * 136 - PF0 default prod; 137 - PF1 default prod;
5598 * 138 - PF2 default prod; 139 - PF3 default prod;
5599 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5600 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5601 * 144-147 reserved.
5602 *
5603 * E1.5 mode - In backward compatible mode;
5604 * for non default SB; each even line in the memory
5605 * holds the U producer and each odd line hold
5606 * the C producer. The first 128 producers are for
5607 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5608 * producers are for the DSB for each PF.
5609 * Each PF has five segments: (the order inside each
5610 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5611 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5612 * 144-147 attn prods;
5613 */
5614 /* non-default-status-blocks */
5615 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5616 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5617 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5618 prod_offset = (bp->igu_base_sb + sb_idx) *
5619 num_segs;
5620
5621 for (i = 0; i < num_segs; i++) {
5622 addr = IGU_REG_PROD_CONS_MEMORY +
5623 (prod_offset + i) * 4;
5624 REG_WR(bp, addr, 0);
5625 }
5626 /* send consumer update with value 0 */
5627 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5628 USTORM_ID, 0, IGU_INT_NOP, 1);
5629 bnx2x_igu_clear_sb(bp,
5630 bp->igu_base_sb + sb_idx);
5631 }
5632
5633 /* default-status-blocks */
5634 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5635 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5636
5637 if (CHIP_MODE_IS_4_PORT(bp))
5638 dsb_idx = BP_FUNC(bp);
5639 else
5640 dsb_idx = BP_E1HVN(bp);
5641
5642 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5643 IGU_BC_BASE_DSB_PROD + dsb_idx :
5644 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5645
5646 for (i = 0; i < (num_segs * E1HVN_MAX);
5647 i += E1HVN_MAX) {
5648 addr = IGU_REG_PROD_CONS_MEMORY +
5649 (prod_offset + i)*4;
5650 REG_WR(bp, addr, 0);
5651 }
5652 /* send consumer update with 0 */
5653 if (CHIP_INT_MODE_IS_BC(bp)) {
5654 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5655 USTORM_ID, 0, IGU_INT_NOP, 1);
5656 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5657 CSTORM_ID, 0, IGU_INT_NOP, 1);
5658 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5659 XSTORM_ID, 0, IGU_INT_NOP, 1);
5660 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5661 TSTORM_ID, 0, IGU_INT_NOP, 1);
5662 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5663 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5664 } else {
5665 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5666 USTORM_ID, 0, IGU_INT_NOP, 1);
5667 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5668 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5669 }
5670 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5671
5672 /* !!! these should become driver const once
5673 rf-tool supports split-68 const */
5674 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5675 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5676 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5677 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5678 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5679 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5680 }
4900 } 5681 }
4901 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4902 5682
4903 /* Reset PCIE errors for debug */ 5683 /* Reset PCIE errors for debug */
4904 REG_WR(bp, 0x2114, 0xffffffff); 5684 REG_WR(bp, 0x2114, 0xffffffff);
@@ -4920,7 +5700,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4920 int rc = 0; 5700 int rc = 0;
4921 5701
4922 DP(BNX2X_MSG_MCP, "function %d load_code %x\n", 5702 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4923 BP_FUNC(bp), load_code); 5703 BP_ABS_FUNC(bp), load_code);
4924 5704
4925 bp->dmae_ready = 0; 5705 bp->dmae_ready = 0;
4926 mutex_init(&bp->dmae_mutex); 5706 mutex_init(&bp->dmae_mutex);
@@ -4930,6 +5710,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4930 5710
4931 switch (load_code) { 5711 switch (load_code) {
4932 case FW_MSG_CODE_DRV_LOAD_COMMON: 5712 case FW_MSG_CODE_DRV_LOAD_COMMON:
5713 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4933 rc = bnx2x_init_hw_common(bp, load_code); 5714 rc = bnx2x_init_hw_common(bp, load_code);
4934 if (rc) 5715 if (rc)
4935 goto init_hw_err; 5716 goto init_hw_err;
@@ -4953,10 +5734,10 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4953 } 5734 }
4954 5735
4955 if (!BP_NOMCP(bp)) { 5736 if (!BP_NOMCP(bp)) {
4956 int func = BP_FUNC(bp); 5737 int mb_idx = BP_FW_MB_IDX(bp);
4957 5738
4958 bp->fw_drv_pulse_wr_seq = 5739 bp->fw_drv_pulse_wr_seq =
4959 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) & 5740 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
4960 DRV_PULSE_SEQ_MASK); 5741 DRV_PULSE_SEQ_MASK);
4961 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); 5742 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4962 } 5743 }
@@ -4993,9 +5774,14 @@ void bnx2x_free_mem(struct bnx2x *bp)
4993 /* Common */ 5774 /* Common */
4994 for_each_queue(bp, i) { 5775 for_each_queue(bp, i) {
4995 /* status blocks */ 5776 /* status blocks */
4996 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb), 5777 if (CHIP_IS_E2(bp))
4997 bnx2x_fp(bp, i, status_blk_mapping), 5778 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
4998 sizeof(struct host_hc_status_block_e1x)); 5779 bnx2x_fp(bp, i, status_blk_mapping),
5780 sizeof(struct host_hc_status_block_e2));
5781 else
5782 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5783 bnx2x_fp(bp, i, status_blk_mapping),
5784 sizeof(struct host_hc_status_block_e1x));
4999 } 5785 }
5000 /* Rx */ 5786 /* Rx */
5001 for_each_queue(bp, i) { 5787 for_each_queue(bp, i) {
@@ -5041,9 +5827,12 @@ void bnx2x_free_mem(struct bnx2x *bp)
5041 5827
5042 BNX2X_FREE(bp->ilt->lines); 5828 BNX2X_FREE(bp->ilt->lines);
5043#ifdef BCM_CNIC 5829#ifdef BCM_CNIC
5044 5830 if (CHIP_IS_E2(bp))
5045 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, 5831 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5046 sizeof(struct host_hc_status_block_e1x)); 5832 sizeof(struct host_hc_status_block_e2));
5833 else
5834 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5835 sizeof(struct host_hc_status_block_e1x));
5047 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 5836 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5048#endif 5837#endif
5049 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 5838 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
@@ -5055,6 +5844,22 @@ void bnx2x_free_mem(struct bnx2x *bp)
5055#undef BNX2X_KFREE 5844#undef BNX2X_KFREE
5056} 5845}
5057 5846
5847static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5848{
5849 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5850 if (CHIP_IS_E2(bp)) {
5851 bnx2x_fp(bp, index, sb_index_values) =
5852 (__le16 *)status_blk.e2_sb->sb.index_values;
5853 bnx2x_fp(bp, index, sb_running_index) =
5854 (__le16 *)status_blk.e2_sb->sb.running_index;
5855 } else {
5856 bnx2x_fp(bp, index, sb_index_values) =
5857 (__le16 *)status_blk.e1x_sb->sb.index_values;
5858 bnx2x_fp(bp, index, sb_running_index) =
5859 (__le16 *)status_blk.e1x_sb->sb.running_index;
5860 }
5861}
5862
5058int bnx2x_alloc_mem(struct bnx2x *bp) 5863int bnx2x_alloc_mem(struct bnx2x *bp)
5059{ 5864{
5060 5865
@@ -5074,25 +5879,23 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
5074 } while (0) 5879 } while (0)
5075 5880
5076 int i; 5881 int i;
5077 void *p;
5078 5882
5079 /* fastpath */ 5883 /* fastpath */
5080 /* Common */ 5884 /* Common */
5081 for_each_queue(bp, i) { 5885 for_each_queue(bp, i) {
5886 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
5082 bnx2x_fp(bp, i, bp) = bp; 5887 bnx2x_fp(bp, i, bp) = bp;
5083
5084 /* status blocks */ 5888 /* status blocks */
5085 BNX2X_PCI_ALLOC(p, 5889 if (CHIP_IS_E2(bp))
5890 BNX2X_PCI_ALLOC(sb->e2_sb,
5891 &bnx2x_fp(bp, i, status_blk_mapping),
5892 sizeof(struct host_hc_status_block_e2));
5893 else
5894 BNX2X_PCI_ALLOC(sb->e1x_sb,
5086 &bnx2x_fp(bp, i, status_blk_mapping), 5895 &bnx2x_fp(bp, i, status_blk_mapping),
5087 sizeof(struct host_hc_status_block_e1x)); 5896 sizeof(struct host_hc_status_block_e1x));
5088 5897
5089 bnx2x_fp(bp, i, status_blk.e1x_sb) = 5898 set_sb_shortcuts(bp, i);
5090 (struct host_hc_status_block_e1x *)p;
5091
5092 bnx2x_fp(bp, i, sb_index_values) = (__le16 *)
5093 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.index_values);
5094 bnx2x_fp(bp, i, sb_running_index) = (__le16 *)
5095 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.running_index);
5096 } 5899 }
5097 /* Rx */ 5900 /* Rx */
5098 for_each_queue(bp, i) { 5901 for_each_queue(bp, i) {
@@ -5129,8 +5932,12 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
5129 /* end of fastpath */ 5932 /* end of fastpath */
5130 5933
5131#ifdef BCM_CNIC 5934#ifdef BCM_CNIC
5132 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, 5935 if (CHIP_IS_E2(bp))
5133 sizeof(struct host_hc_status_block_e1x)); 5936 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5937 sizeof(struct host_hc_status_block_e2));
5938 else
5939 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5940 sizeof(struct host_hc_status_block_e1x));
5134 5941
5135 /* allocate searcher T2 table */ 5942 /* allocate searcher T2 table */
5136 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 5943 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
@@ -5210,11 +6017,6 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
5210 bp->set_mac_pending = 1; 6017 bp->set_mac_pending = 1;
5211 smp_wmb(); 6018 smp_wmb();
5212 6019
5213 config->hdr.length = 1 + (is_bcast ? 1 : 0);
5214 config->hdr.offset = cam_offset;
5215 config->hdr.client_id = 0xff;
5216 config->hdr.reserved1 = 0;
5217
5218 config->hdr.length = 1; 6020 config->hdr.length = 1;
5219 config->hdr.offset = cam_offset; 6021 config->hdr.offset = cam_offset;
5220 config->hdr.client_id = 0xff; 6022 config->hdr.client_id = 0xff;
@@ -5312,7 +6114,12 @@ int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5312 6114
5313u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset) 6115u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
5314{ 6116{
5315 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); 6117 if (CHIP_IS_E1H(bp))
6118 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6119 else if (CHIP_MODE_IS_4_PORT(bp))
6120 return BP_FUNC(bp) * 32 + rel_offset;
6121 else
6122 return BP_VN(bp) * 32 + rel_offset;
5316} 6123}
5317 6124
5318void bnx2x_set_eth_mac(struct bnx2x *bp, int set) 6125void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
@@ -5804,9 +6611,11 @@ static void bnx2x_reset_func(struct bnx2x *bp)
5804{ 6611{
5805 int port = BP_PORT(bp); 6612 int port = BP_PORT(bp);
5806 int func = BP_FUNC(bp); 6613 int func = BP_FUNC(bp);
5807 int base, i; 6614 int i;
5808 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) + 6615 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
5809 offsetof(struct hc_status_block_data_e1x, common); 6616 (CHIP_IS_E2(bp) ?
6617 offsetof(struct hc_status_block_data_e2, common) :
6618 offsetof(struct hc_status_block_data_e1x, common));
5810 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func); 6619 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
5811 int pfid_offset = offsetof(struct pci_entity, pf_id); 6620 int pfid_offset = offsetof(struct pci_entity, pf_id);
5812 6621
@@ -5839,8 +6648,13 @@ static void bnx2x_reset_func(struct bnx2x *bp)
5839 0); 6648 0);
5840 6649
5841 /* Configure IGU */ 6650 /* Configure IGU */
5842 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 6651 if (bp->common.int_block == INT_BLOCK_HC) {
5843 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 6652 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6653 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6654 } else {
6655 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6656 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6657 }
5844 6658
5845#ifdef BCM_CNIC 6659#ifdef BCM_CNIC
5846 /* Disable Timer scan */ 6660 /* Disable Timer scan */
@@ -5856,9 +6670,25 @@ static void bnx2x_reset_func(struct bnx2x *bp)
5856 } 6670 }
5857#endif 6671#endif
5858 /* Clear ILT */ 6672 /* Clear ILT */
5859 base = FUNC_ILT_BASE(func); 6673 bnx2x_clear_func_ilt(bp, func);
5860 for (i = base; i < base + ILT_PER_FUNC; i++) 6674
5861 bnx2x_ilt_wr(bp, i, 0); 6675 /* Timers workaround bug for E2: if this is vnic-3,
6676 * we need to set the entire ilt range for this timers.
6677 */
6678 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6679 struct ilt_client_info ilt_cli;
6680 /* use dummy TM client */
6681 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6682 ilt_cli.start = 0;
6683 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6684 ilt_cli.client_num = ILT_CLIENT_TM;
6685
6686 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6687 }
6688
6689 /* this assumes that reset_port() called before reset_func()*/
6690 if (CHIP_IS_E2(bp))
6691 bnx2x_pf_disable(bp);
5862 6692
5863 bp->dmae_ready = 0; 6693 bp->dmae_ready = 0;
5864} 6694}
@@ -5892,7 +6722,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
5892static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) 6722static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5893{ 6723{
5894 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", 6724 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5895 BP_FUNC(bp), reset_code); 6725 BP_ABS_FUNC(bp), reset_code);
5896 6726
5897 switch (reset_code) { 6727 switch (reset_code) {
5898 case FW_MSG_CODE_DRV_UNLOAD_COMMON: 6728 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
@@ -6024,15 +6854,20 @@ unload_error:
6024 if (!BP_NOMCP(bp)) 6854 if (!BP_NOMCP(bp))
6025 reset_code = bnx2x_fw_command(bp, reset_code, 0); 6855 reset_code = bnx2x_fw_command(bp, reset_code, 0);
6026 else { 6856 else {
6027 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n", 6857 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6028 load_count[0], load_count[1], load_count[2]); 6858 "%d, %d, %d\n", BP_PATH(bp),
6029 load_count[0]--; 6859 load_count[BP_PATH(bp)][0],
6030 load_count[1 + port]--; 6860 load_count[BP_PATH(bp)][1],
6031 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n", 6861 load_count[BP_PATH(bp)][2]);
6032 load_count[0], load_count[1], load_count[2]); 6862 load_count[BP_PATH(bp)][0]--;
6033 if (load_count[0] == 0) 6863 load_count[BP_PATH(bp)][1 + port]--;
6864 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6865 "%d, %d, %d\n", BP_PATH(bp),
6866 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6867 load_count[BP_PATH(bp)][2]);
6868 if (load_count[BP_PATH(bp)][0] == 0)
6034 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 6869 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6035 else if (load_count[1 + port] == 0) 6870 else if (load_count[BP_PATH(bp)][1 + port] == 0)
6036 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 6871 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6037 else 6872 else
6038 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 6873 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -6531,39 +7366,23 @@ reset_task_exit:
6531 * Init service functions 7366 * Init service functions
6532 */ 7367 */
6533 7368
6534static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func) 7369u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
6535{ 7370{
6536 switch (func) { 7371 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
6537 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0; 7372 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
6538 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1; 7373 return base + (BP_ABS_FUNC(bp)) * stride;
6539 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
6540 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
6541 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
6542 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
6543 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
6544 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
6545 default:
6546 BNX2X_ERR("Unsupported function index: %d\n", func);
6547 return (u32)(-1);
6548 }
6549} 7374}
6550 7375
6551static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func) 7376static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
6552{ 7377{
6553 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val; 7378 u32 reg = bnx2x_get_pretend_reg(bp);
6554 7379
6555 /* Flush all outstanding writes */ 7380 /* Flush all outstanding writes */
6556 mmiowb(); 7381 mmiowb();
6557 7382
6558 /* Pretend to be function 0 */ 7383 /* Pretend to be function 0 */
6559 REG_WR(bp, reg, 0); 7384 REG_WR(bp, reg, 0);
6560 /* Flush the GRC transaction (in the chip) */ 7385 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
6561 new_val = REG_RD(bp, reg);
6562 if (new_val != 0) {
6563 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
6564 new_val);
6565 BUG();
6566 }
6567 7386
6568 /* From now we are in the "like-E1" mode */ 7387 /* From now we are in the "like-E1" mode */
6569 bnx2x_int_disable(bp); 7388 bnx2x_int_disable(bp);
@@ -6571,22 +7390,17 @@ static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
6571 /* Flush all outstanding writes */ 7390 /* Flush all outstanding writes */
6572 mmiowb(); 7391 mmiowb();
6573 7392
6574 /* Restore the original funtion settings */ 7393 /* Restore the original function */
6575 REG_WR(bp, reg, orig_func); 7394 REG_WR(bp, reg, BP_ABS_FUNC(bp));
6576 new_val = REG_RD(bp, reg); 7395 REG_RD(bp, reg);
6577 if (new_val != orig_func) {
6578 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
6579 orig_func, new_val);
6580 BUG();
6581 }
6582} 7396}
6583 7397
6584static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func) 7398static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
6585{ 7399{
6586 if (CHIP_IS_E1H(bp)) 7400 if (CHIP_IS_E1(bp))
6587 bnx2x_undi_int_disable_e1h(bp, func);
6588 else
6589 bnx2x_int_disable(bp); 7401 bnx2x_int_disable(bp);
7402 else
7403 bnx2x_undi_int_disable_e1h(bp);
6590} 7404}
6591 7405
6592static void __devinit bnx2x_undi_unload(struct bnx2x *bp) 7406static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
@@ -6603,8 +7417,8 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6603 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 7417 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6604 if (val == 0x7) { 7418 if (val == 0x7) {
6605 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 7419 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6606 /* save our func */ 7420 /* save our pf_num */
6607 int func = BP_FUNC(bp); 7421 int orig_pf_num = bp->pf_num;
6608 u32 swap_en; 7422 u32 swap_en;
6609 u32 swap_val; 7423 u32 swap_val;
6610 7424
@@ -6614,9 +7428,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6614 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 7428 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6615 7429
6616 /* try unload UNDI on port 0 */ 7430 /* try unload UNDI on port 0 */
6617 bp->func = 0; 7431 bp->pf_num = 0;
6618 bp->fw_seq = 7432 bp->fw_seq =
6619 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7433 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
6620 DRV_MSG_SEQ_NUMBER_MASK); 7434 DRV_MSG_SEQ_NUMBER_MASK);
6621 reset_code = bnx2x_fw_command(bp, reset_code, 0); 7435 reset_code = bnx2x_fw_command(bp, reset_code, 0);
6622 7436
@@ -6628,9 +7442,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6628 DRV_MSG_CODE_UNLOAD_DONE, 0); 7442 DRV_MSG_CODE_UNLOAD_DONE, 0);
6629 7443
6630 /* unload UNDI on port 1 */ 7444 /* unload UNDI on port 1 */
6631 bp->func = 1; 7445 bp->pf_num = 1;
6632 bp->fw_seq = 7446 bp->fw_seq =
6633 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7447 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
6634 DRV_MSG_SEQ_NUMBER_MASK); 7448 DRV_MSG_SEQ_NUMBER_MASK);
6635 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 7449 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6636 7450
@@ -6640,7 +7454,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6640 /* now it's safe to release the lock */ 7454 /* now it's safe to release the lock */
6641 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 7455 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6642 7456
6643 bnx2x_undi_int_disable(bp, func); 7457 bnx2x_undi_int_disable(bp);
6644 7458
6645 /* close input traffic and wait for it */ 7459 /* close input traffic and wait for it */
6646 /* Do not rcv packets to BRB */ 7460 /* Do not rcv packets to BRB */
@@ -6679,9 +7493,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6679 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 7493 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
6680 7494
6681 /* restore our func and fw_seq */ 7495 /* restore our func and fw_seq */
6682 bp->func = func; 7496 bp->pf_num = orig_pf_num;
6683 bp->fw_seq = 7497 bp->fw_seq =
6684 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7498 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
6685 DRV_MSG_SEQ_NUMBER_MASK); 7499 DRV_MSG_SEQ_NUMBER_MASK);
6686 7500
6687 } else 7501 } else
@@ -6705,20 +7519,42 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6705 val = REG_RD(bp, MISC_REG_BOND_ID); 7519 val = REG_RD(bp, MISC_REG_BOND_ID);
6706 id |= (val & 0xf); 7520 id |= (val & 0xf);
6707 bp->common.chip_id = id; 7521 bp->common.chip_id = id;
6708 bp->link_params.chip_id = bp->common.chip_id;
6709 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6710
6711 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
6712 7522
6713 /* Set doorbell size */ 7523 /* Set doorbell size */
6714 bp->db_size = (1 << BNX2X_DB_SHIFT); 7524 bp->db_size = (1 << BNX2X_DB_SHIFT);
6715 7525
7526 if (CHIP_IS_E2(bp)) {
7527 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7528 if ((val & 1) == 0)
7529 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7530 else
7531 val = (val >> 1) & 1;
7532 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7533 "2_PORT_MODE");
7534 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7535 CHIP_2_PORT_MODE;
7536
7537 if (CHIP_MODE_IS_4_PORT(bp))
7538 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7539 else
7540 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7541 } else {
7542 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7543 bp->pfid = bp->pf_num; /* 0..7 */
7544 }
7545
6716 /* 7546 /*
6717 * set base FW non-default (fast path) status block id, this value is 7547 * set base FW non-default (fast path) status block id, this value is
6718 * used to initialize the fw_sb_id saved on the fp/queue structure to 7548 * used to initialize the fw_sb_id saved on the fp/queue structure to
6719 * determine the id used by the FW. 7549 * determine the id used by the FW.
6720 */ 7550 */
6721 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x; 7551 if (CHIP_IS_E1x(bp))
7552 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7553 else /* E2 */
7554 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7555
7556 bp->link_params.chip_id = bp->common.chip_id;
7557 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6722 7558
6723 val = (REG_RD(bp, 0x2874) & 0x55); 7559 val = (REG_RD(bp, 0x2874) & 0x55);
6724 if ((bp->common.chip_id & 0x1) || 7560 if ((bp->common.chip_id & 0x1) ||
@@ -6734,15 +7570,15 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6734 bp->common.flash_size, bp->common.flash_size); 7570 bp->common.flash_size, bp->common.flash_size);
6735 7571
6736 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 7572 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6737 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0); 7573 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7574 MISC_REG_GENERIC_CR_1 :
7575 MISC_REG_GENERIC_CR_0));
6738 bp->link_params.shmem_base = bp->common.shmem_base; 7576 bp->link_params.shmem_base = bp->common.shmem_base;
6739 bp->link_params.shmem2_base = bp->common.shmem2_base; 7577 bp->link_params.shmem2_base = bp->common.shmem2_base;
6740 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 7578 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6741 bp->common.shmem_base, bp->common.shmem2_base); 7579 bp->common.shmem_base, bp->common.shmem2_base);
6742 7580
6743 if (!bp->common.shmem_base || 7581 if (!bp->common.shmem_base) {
6744 (bp->common.shmem_base < 0xA0000) ||
6745 (bp->common.shmem_base >= 0xC0000)) {
6746 BNX2X_DEV_INFO("MCP not active\n"); 7582 BNX2X_DEV_INFO("MCP not active\n");
6747 bp->flags |= NO_MCP_FLAG; 7583 bp->flags |= NO_MCP_FLAG;
6748 return; 7584 return;
@@ -6751,7 +7587,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6751 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 7587 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6752 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7588 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6753 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7589 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6754 BNX2X_ERROR("BAD MCP validity signature\n"); 7590 BNX2X_ERR("BAD MCP validity signature\n");
6755 7591
6756 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 7592 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6757 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 7593 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
@@ -6775,8 +7611,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6775 if (val < BNX2X_BC_VER) { 7611 if (val < BNX2X_BC_VER) {
6776 /* for now only warn 7612 /* for now only warn
6777 * later we might need to enforce this */ 7613 * later we might need to enforce this */
6778 BNX2X_ERROR("This driver needs bc_ver %X but found %X, " 7614 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
6779 "please upgrade BC\n", BNX2X_BC_VER, val); 7615 "please upgrade BC\n", BNX2X_BC_VER, val);
6780 } 7616 }
6781 bp->link_params.feature_config_flags |= 7617 bp->link_params.feature_config_flags |=
6782 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 7618 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
@@ -6804,6 +7640,57 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6804 val, val2, val3, val4); 7640 val, val2, val3, val4);
6805} 7641}
6806 7642
7643#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7644#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7645
7646static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7647{
7648 int pfid = BP_FUNC(bp);
7649 int vn = BP_E1HVN(bp);
7650 int igu_sb_id;
7651 u32 val;
7652 u8 fid;
7653
7654 bp->igu_base_sb = 0xff;
7655 bp->igu_sb_cnt = 0;
7656 if (CHIP_INT_MODE_IS_BC(bp)) {
7657 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7658 bp->l2_cid_count);
7659
7660 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7661 FP_SB_MAX_E1x;
7662
7663 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7664 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7665
7666 return;
7667 }
7668
7669 /* IGU in normal mode - read CAM */
7670 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7671 igu_sb_id++) {
7672 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7673 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7674 continue;
7675 fid = IGU_FID(val);
7676 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7677 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7678 continue;
7679 if (IGU_VEC(val) == 0)
7680 /* default status block */
7681 bp->igu_dsb_id = igu_sb_id;
7682 else {
7683 if (bp->igu_base_sb == 0xff)
7684 bp->igu_base_sb = igu_sb_id;
7685 bp->igu_sb_cnt++;
7686 }
7687 }
7688 }
7689 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7690 if (bp->igu_sb_cnt == 0)
7691 BNX2X_ERR("CAM configuration error\n");
7692}
7693
6807static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 7694static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6808 u32 switch_cfg) 7695 u32 switch_cfg)
6809{ 7696{
@@ -7178,26 +8065,49 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7178 8065
7179static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 8066static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7180{ 8067{
7181 int func = BP_FUNC(bp); 8068 int func = BP_ABS_FUNC(bp);
8069 int vn;
7182 u32 val, val2; 8070 u32 val, val2;
7183 int rc = 0; 8071 int rc = 0;
7184 8072
7185 bnx2x_get_common_hwinfo(bp); 8073 bnx2x_get_common_hwinfo(bp);
7186 8074
7187 bp->common.int_block = INT_BLOCK_HC; 8075 if (CHIP_IS_E1x(bp)) {
8076 bp->common.int_block = INT_BLOCK_HC;
8077
8078 bp->igu_dsb_id = DEF_SB_IGU_ID;
8079 bp->igu_base_sb = 0;
8080 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8081 } else {
8082 bp->common.int_block = INT_BLOCK_IGU;
8083 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8084 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8085 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8086 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8087 } else
8088 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
7188 8089
7189 bp->igu_dsb_id = DEF_SB_IGU_ID; 8090 bnx2x_get_igu_cam_info(bp);
7190 bp->igu_base_sb = 0; 8091
7191 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count); 8092 }
8093 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8094 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8095
8096 /*
8097 * Initialize MF configuration
8098 */
7192 8099
7193 bp->mf_ov = 0; 8100 bp->mf_ov = 0;
7194 bp->mf_mode = 0; 8101 bp->mf_mode = 0;
7195 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) { 8102 vn = BP_E1HVN(bp);
7196 8103 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
7197 bp->common.mf_cfg_base = bp->common.shmem_base + 8104 if (SHMEM2_HAS(bp, mf_cfg_addr))
8105 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8106 else
8107 bp->common.mf_cfg_base = bp->common.shmem_base +
7198 offsetof(struct shmem_region, func_mb) + 8108 offsetof(struct shmem_region, func_mb) +
7199 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 8109 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
7200 bp->mf_config = 8110 bp->mf_config[vn] =
7201 MF_CFG_RD(bp, func_mf_config[func].config); 8111 MF_CFG_RD(bp, func_mf_config[func].config);
7202 8112
7203 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) & 8113 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
@@ -7213,16 +8123,16 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7213 FUNC_MF_CFG_E1HOV_TAG_MASK); 8123 FUNC_MF_CFG_E1HOV_TAG_MASK);
7214 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 8124 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7215 bp->mf_ov = val; 8125 bp->mf_ov = val;
7216 BNX2X_DEV_INFO("E1HOV for func %d is %d " 8126 BNX2X_DEV_INFO("MF OV for func %d is %d "
7217 "(0x%04x)\n", 8127 "(0x%04x)\n",
7218 func, bp->mf_ov, bp->mf_ov); 8128 func, bp->mf_ov, bp->mf_ov);
7219 } else { 8129 } else {
7220 BNX2X_ERROR("No valid E1HOV for func %d," 8130 BNX2X_ERROR("No valid MF OV for func %d,"
7221 " aborting\n", func); 8131 " aborting\n", func);
7222 rc = -EPERM; 8132 rc = -EPERM;
7223 } 8133 }
7224 } else { 8134 } else {
7225 if (BP_E1HVN(bp)) { 8135 if (BP_VN(bp)) {
7226 BNX2X_ERROR("VN %d in single function mode," 8136 BNX2X_ERROR("VN %d in single function mode,"
7227 " aborting\n", BP_E1HVN(bp)); 8137 " aborting\n", BP_E1HVN(bp));
7228 rc = -EPERM; 8138 rc = -EPERM;
@@ -7230,15 +8140,25 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7230 } 8140 }
7231 } 8141 }
7232 8142
7233 /* adjust igu_sb_cnt to MF */ 8143 /* adjust igu_sb_cnt to MF for E1x */
7234 if (IS_MF(bp)) 8144 if (CHIP_IS_E1x(bp) && IS_MF(bp))
7235 bp->igu_sb_cnt /= E1HVN_MAX; 8145 bp->igu_sb_cnt /= E1HVN_MAX;
7236 8146
8147 /*
8148 * adjust E2 sb count: to be removed when FW will support
8149 * more then 16 L2 clients
8150 */
8151#define MAX_L2_CLIENTS 16
8152 if (CHIP_IS_E2(bp))
8153 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8154 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8155
7237 if (!BP_NOMCP(bp)) { 8156 if (!BP_NOMCP(bp)) {
7238 bnx2x_get_port_hwinfo(bp); 8157 bnx2x_get_port_hwinfo(bp);
7239 8158
7240 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) & 8159 bp->fw_seq =
7241 DRV_MSG_SEQ_NUMBER_MASK); 8160 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8161 DRV_MSG_SEQ_NUMBER_MASK);
7242 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 8162 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7243 } 8163 }
7244 8164
@@ -7338,7 +8258,7 @@ out_not_found:
7338 8258
7339static int __devinit bnx2x_init_bp(struct bnx2x *bp) 8259static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7340{ 8260{
7341 int func = BP_FUNC(bp); 8261 int func;
7342 int timer_interval; 8262 int timer_interval;
7343 int rc; 8263 int rc;
7344 8264
@@ -7362,6 +8282,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7362 rc = bnx2x_alloc_mem_bp(bp); 8282 rc = bnx2x_alloc_mem_bp(bp);
7363 8283
7364 bnx2x_read_fwinfo(bp); 8284 bnx2x_read_fwinfo(bp);
8285
8286 func = BP_FUNC(bp);
8287
7365 /* need to reset chip if undi was active */ 8288 /* need to reset chip if undi was active */
7366 if (!BP_NOMCP(bp)) 8289 if (!BP_NOMCP(bp))
7367 bnx2x_undi_unload(bp); 8290 bnx2x_undi_unload(bp);
@@ -7650,7 +8573,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7650 bp->dev = dev; 8573 bp->dev = dev;
7651 bp->pdev = pdev; 8574 bp->pdev = pdev;
7652 bp->flags = 0; 8575 bp->flags = 0;
7653 bp->func = PCI_FUNC(pdev->devfn); 8576 bp->pf_num = PCI_FUNC(pdev->devfn);
7654 8577
7655 rc = pci_enable_device(pdev); 8578 rc = pci_enable_device(pdev);
7656 if (rc) { 8579 if (rc) {
@@ -7964,6 +8887,8 @@ int bnx2x_init_firmware(struct bnx2x *bp)
7964 fw_file_name = FW_FILE_NAME_E1; 8887 fw_file_name = FW_FILE_NAME_E1;
7965 else if (CHIP_IS_E1H(bp)) 8888 else if (CHIP_IS_E1H(bp))
7966 fw_file_name = FW_FILE_NAME_E1H; 8889 fw_file_name = FW_FILE_NAME_E1H;
8890 else if (CHIP_IS_E2(bp))
8891 fw_file_name = FW_FILE_NAME_E2;
7967 else { 8892 else {
7968 BNX2X_ERR("Unsupported chip revision\n"); 8893 BNX2X_ERR("Unsupported chip revision\n");
7969 return -EINVAL; 8894 return -EINVAL;
@@ -8047,8 +8972,25 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8047 int pcie_width, pcie_speed; 8972 int pcie_width, pcie_speed;
8048 int rc, cid_count; 8973 int rc, cid_count;
8049 8974
8050 cid_count = FP_SB_MAX_E1x + CNIC_CONTEXT_USE; 8975 switch (ent->driver_data) {
8976 case BCM57710:
8977 case BCM57711:
8978 case BCM57711E:
8979 cid_count = FP_SB_MAX_E1x;
8980 break;
8981
8982 case BCM57712:
8983 case BCM57712E:
8984 cid_count = FP_SB_MAX_E2;
8985 break;
8051 8986
8987 default:
8988 pr_err("Unknown board_type (%ld), aborting\n",
8989 ent->driver_data);
8990 return ENODEV;
8991 }
8992
8993 cid_count += CNIC_CONTEXT_USE;
8052 /* dev zeroed in init_etherdev */ 8994 /* dev zeroed in init_etherdev */
8053 dev = alloc_etherdev_mq(sizeof(*bp), cid_count); 8995 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
8054 if (!dev) { 8996 if (!dev) {
@@ -8086,7 +9028,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8086 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," 9028 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
8087 " IRQ %d, ", board_info[ent->driver_data].name, 9029 " IRQ %d, ", board_info[ent->driver_data].name,
8088 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 9030 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
8089 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz", 9031 pcie_width,
9032 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9033 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9034 "5GHz (Gen2)" : "2.5GHz",
8090 dev->base_addr, bp->pdev->irq); 9035 dev->base_addr, bp->pdev->irq);
8091 pr_cont("node addr %pM\n", dev->dev_addr); 9036 pr_cont("node addr %pM\n", dev->dev_addr);
8092 9037
@@ -8199,8 +9144,9 @@ static void bnx2x_eeh_recover(struct bnx2x *bp)
8199 BNX2X_ERR("BAD MCP validity signature\n"); 9144 BNX2X_ERR("BAD MCP validity signature\n");
8200 9145
8201 if (!BP_NOMCP(bp)) { 9146 if (!BP_NOMCP(bp)) {
8202 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header) 9147 bp->fw_seq =
8203 & DRV_MSG_SEQ_NUMBER_MASK); 9148 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9149 DRV_MSG_SEQ_NUMBER_MASK);
8204 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 9150 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8205 } 9151 }
8206} 9152}
@@ -8283,7 +9229,8 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
8283 struct bnx2x *bp = netdev_priv(dev); 9229 struct bnx2x *bp = netdev_priv(dev);
8284 9230
8285 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 9231 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
8286 printk(KERN_ERR "Handling parity error recovery. Try again later\n"); 9232 printk(KERN_ERR "Handling parity error recovery. "
9233 "Try again later\n");
8287 return; 9234 return;
8288 } 9235 }
8289 9236
@@ -8560,7 +9507,11 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
8560 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 9507 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
8561 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 9508 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
8562 } 9509 }
8563 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; 9510 if (CHIP_IS_E2(bp))
9511 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9512 else
9513 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9514
8564 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp); 9515 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
8565 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp); 9516 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
8566 cp->irq_arr[1].status_blk = bp->def_status_blk; 9517 cp->irq_arr[1].status_blk = bp->def_status_blk;
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 6be0d09ad3fd..18a86284ebcc 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
1/* bnx2x_reg.h: Broadcom Everest network driver. 1/* bnx2x_reg.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation 3 * Copyright (c) 2007-2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -19,7 +19,20 @@
19 * 19 *
20 */ 20 */
21 21
22 22#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
23#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
24#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5)
25#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3)
26#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4)
27#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1)
28/* [RW 1] Initiate the ATC array - reset all the valid bits */
29#define ATC_REG_ATC_INIT_ARRAY 0x1100b8
30/* [R 1] ATC initalization done */
31#define ATC_REG_ATC_INIT_DONE 0x1100bc
32/* [RC 6] Interrupt register #0 read clear */
33#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
34/* [RW 19] Interrupt mask register #0 read/write */
35#define BRB1_REG_BRB1_INT_MASK 0x60128
23/* [R 19] Interrupt register #0 read */ 36/* [R 19] Interrupt register #0 read */
24#define BRB1_REG_BRB1_INT_STS 0x6011c 37#define BRB1_REG_BRB1_INT_STS 0x6011c
25/* [RW 4] Parity mask register #0 read/write */ 38/* [RW 4] Parity mask register #0 read/write */
@@ -27,9 +40,31 @@
27/* [R 4] Parity register #0 read */ 40/* [R 4] Parity register #0 read */
28#define BRB1_REG_BRB1_PRTY_STS 0x6012c 41#define BRB1_REG_BRB1_PRTY_STS 0x6012c
29/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At 42/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
30 address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address 43 * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
31 BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. */ 44 * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
45 * following reset the first rbc access to this reg must be write; there can
46 * be no more rbc writes after the first one; there can be any number of rbc
47 * read following the first write; rbc access not following these rules will
48 * result in hang condition. */
32#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200 49#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200
50/* [RW 10] The number of free blocks below which the full signal to class 0
51 * is asserted */
52#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0
53/* [RW 10] The number of free blocks above which the full signal to class 0
54 * is de-asserted */
55#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4
56/* [RW 10] The number of free blocks below which the full signal to class 1
57 * is asserted */
58#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8
59/* [RW 10] The number of free blocks above which the full signal to class 1
60 * is de-asserted */
61#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc
62/* [RW 10] The number of free blocks below which the full signal to the LB
63 * port is asserted */
64#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0
65/* [RW 10] The number of free blocks above which the full signal to the LB
66 * port is de-asserted */
67#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4
33/* [RW 10] The number of free blocks above which the High_llfc signal to 68/* [RW 10] The number of free blocks above which the High_llfc signal to
34 interface #n is de-asserted. */ 69 interface #n is de-asserted. */
35#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c 70#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c
@@ -44,6 +79,9 @@
44/* [RW 10] The number of free blocks below which the Low_llfc signal to 79/* [RW 10] The number of free blocks below which the Low_llfc signal to
45 interface #n is asserted. */ 80 interface #n is asserted. */
46#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c 81#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c
82/* [RW 10] The number of blocks guarantied for the MAC port */
83#define BRB1_REG_MAC_GUARANTIED_0 0x601e8
84#define BRB1_REG_MAC_GUARANTIED_1 0x60240
47/* [R 24] The number of full blocks. */ 85/* [R 24] The number of full blocks. */
48#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090 86#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090
49/* [ST 32] The number of cycles that the write_full signal towards MAC #0 87/* [ST 32] The number of cycles that the write_full signal towards MAC #0
@@ -55,7 +93,19 @@
55 asserted. */ 93 asserted. */
56#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8 94#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8
57#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc 95#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc
58/* [RW 10] Write client 0: De-assert pause threshold. */ 96/* [RW 10] The number of free blocks below which the pause signal to class 0
97 * is asserted */
98#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0
99/* [RW 10] The number of free blocks above which the pause signal to class 0
100 * is de-asserted */
101#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4
102/* [RW 10] The number of free blocks below which the pause signal to class 1
103 * is asserted */
104#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8
105/* [RW 10] The number of free blocks above which the pause signal to class 1
106 * is de-asserted */
107#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc
108/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
59#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 109#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
60#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c 110#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
61/* [RW 10] Write client 0: Assert pause threshold. */ 111/* [RW 10] Write client 0: Assert pause threshold. */
@@ -362,6 +412,7 @@
362#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 412#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
363/* [R 9] Number of Leaving LCIDs in Link List Block */ 413/* [R 9] Number of Leaving LCIDs in Link List Block */
364#define CFC_REG_NUM_LCIDS_LEAVING 0x104018 414#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
415#define CFC_REG_WEAK_ENABLE_PF 0x104124
365/* [RW 8] The event id for aggregated interrupt 0 */ 416/* [RW 8] The event id for aggregated interrupt 0 */
366#define CSDM_REG_AGG_INT_EVENT_0 0xc2038 417#define CSDM_REG_AGG_INT_EVENT_0 0xc2038
367#define CSDM_REG_AGG_INT_EVENT_10 0xc2060 418#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
@@ -590,10 +641,17 @@
590#define CSEM_REG_TS_8_AS 0x200058 641#define CSEM_REG_TS_8_AS 0x200058
591/* [RW 3] The arbitration scheme of time_slot 9 */ 642/* [RW 3] The arbitration scheme of time_slot 9 */
592#define CSEM_REG_TS_9_AS 0x20005c 643#define CSEM_REG_TS_9_AS 0x20005c
644/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
645 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
646#define CSEM_REG_VFPF_ERR_NUM 0x200380
593/* [RW 1] Parity mask register #0 read/write */ 647/* [RW 1] Parity mask register #0 read/write */
594#define DBG_REG_DBG_PRTY_MASK 0xc0a8 648#define DBG_REG_DBG_PRTY_MASK 0xc0a8
595/* [R 1] Parity register #0 read */ 649/* [R 1] Parity register #0 read */
596#define DBG_REG_DBG_PRTY_STS 0xc09c 650#define DBG_REG_DBG_PRTY_STS 0xc09c
651/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
652 * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
653 * 4.Completion function=0; 5.Error handling=0 */
654#define DMAE_REG_BACKWARD_COMP_EN 0x10207c
597/* [RW 32] Commands memory. The address to command X; row Y is to calculated 655/* [RW 32] Commands memory. The address to command X; row Y is to calculated
598 as 14*X+Y. */ 656 as 14*X+Y. */
599#define DMAE_REG_CMD_MEM 0x102400 657#define DMAE_REG_CMD_MEM 0x102400
@@ -758,6 +816,92 @@
758#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068 816#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068
759#define HC_REG_VQID_0 0x108008 817#define HC_REG_VQID_0 0x108008
760#define HC_REG_VQID_1 0x10800c 818#define HC_REG_VQID_1 0x10800c
819#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1)
820#define IGU_REG_ATTENTION_ACK_BITS 0x130108
821/* [R 4] Debug: attn_fsm */
822#define IGU_REG_ATTN_FSM 0x130054
823#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c
824#define IGU_REG_ATTN_MSG_ADDR_L 0x130120
825/* [R 4] Debug: [3] - attention write done message is pending (0-no pending;
826 * 1-pending). [2:0] = PFID. Pending means attention message was sent; but
827 * write done didnt receive. */
828#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030
829#define IGU_REG_BLOCK_CONFIGURATION 0x130000
830#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124
831#define IGU_REG_COMMAND_REG_CTRL 0x13012c
832/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit
833 * is clear. The bits in this registers are set and clear via the producer
834 * command. Data valid only in addresses 0-4. all the rest are zero. */
835#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200
836/* [R 5] Debug: ctrl_fsm */
837#define IGU_REG_CTRL_FSM 0x130064
838/* [R 1] data availble for error memory. If this bit is clear do not red
839 * from error_handling_memory. */
840#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
841/* [R 11] Parity register #0 read */
842#define IGU_REG_IGU_PRTY_STS 0x13009c
843/* [R 4] Debug: int_handle_fsm */
844#define IGU_REG_INT_HANDLE_FSM 0x130050
845#define IGU_REG_LEADING_EDGE_LATCH 0x130134
846/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid.
847 * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF
848 * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */
849#define IGU_REG_MAPPING_MEMORY 0x131000
850#define IGU_REG_MAPPING_MEMORY_SIZE 136
851#define IGU_REG_PBA_STATUS_LSB 0x130138
852#define IGU_REG_PBA_STATUS_MSB 0x13013c
853#define IGU_REG_PCI_PF_MSI_EN 0x130140
854#define IGU_REG_PCI_PF_MSIX_EN 0x130144
855#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148
856/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
857 * pending; 1 = pending. Pendings means interrupt was asserted; and write
858 * done was not received. Data valid only in addresses 0-4. all the rest are
859 * zero. */
860#define IGU_REG_PENDING_BITS_STATUS 0x130300
861#define IGU_REG_PF_CONFIGURATION 0x130154
862/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping
863 * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default
864 * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod;
865 * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode
866 * - In backward compatible mode; for non default SB; each even line in the
867 * memory holds the U producer and each odd line hold the C producer. The
868 * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The
869 * last 20 producers are for the DSB for each PF. each PF has five segments
870 * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
871 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */
872#define IGU_REG_PROD_CONS_MEMORY 0x132000
873/* [R 3] Debug: pxp_arb_fsm */
874#define IGU_REG_PXP_ARB_FSM 0x130068
875/* [RW 6] Write one for each bit will reset the appropriate memory. When the
876 * memory reset finished the appropriate bit will be clear. Bit 0 - mapping
877 * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3
878 * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */
879#define IGU_REG_RESET_MEMORIES 0x130158
880/* [R 4] Debug: sb_ctrl_fsm */
881#define IGU_REG_SB_CTRL_FSM 0x13004c
882#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c
883#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160
884#define IGU_REG_SB_MASK_LSB 0x130164
885#define IGU_REG_SB_MASK_MSB 0x130168
886/* [RW 16] Number of command that were dropped without causing an interrupt
887 * due to: read access for WO BAR address; or write access for RO BAR
888 * address or any access for reserved address or PCI function error is set
889 * and address is not MSIX; PBA or cleanup */
890#define IGU_REG_SILENT_DROP 0x13016c
891/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 -
892 * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per
893 * PF; 68-71 number of ATTN messages per PF */
894#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800
895/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a
896 * timer mask command arrives. Value must be bigger than 100. */
897#define IGU_REG_TIMER_MASKING_VALUE 0x13003c
898#define IGU_REG_TRAILING_EDGE_LATCH 0x130104
899#define IGU_REG_VF_CONFIGURATION 0x130170
900/* [WB_R 32] Each bit represent write done pending bits status for that SB
901 * (MSI/MSIX message was sent and write done was not received yet). 0 =
902 * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
903#define IGU_REG_WRITE_DONE_PENDING 0x130480
904#define MCP_A_REG_MCPR_SCRATCH 0x3a0000
761#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424 905#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424
762#define MCP_REG_MCPR_NVM_ADDR 0x8640c 906#define MCP_REG_MCPR_NVM_ADDR 0x8640c
763#define MCP_REG_MCPR_NVM_CFG4 0x8642c 907#define MCP_REG_MCPR_NVM_CFG4 0x8642c
@@ -880,6 +1024,11 @@
880 rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched 1024 rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
881 ump_tx_parity; [31] MCP Latched scpad_parity; */ 1025 ump_tx_parity; [31] MCP Latched scpad_parity; */
882#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458 1026#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458
1027/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as
1028 * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
1029 * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
1030 * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */
1031#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700
883/* [W 14] write to this register results with the clear of the latched 1032/* [W 14] write to this register results with the clear of the latched
884 signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in 1033 signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
885 d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP 1034 d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
@@ -1251,6 +1400,7 @@
1251#define MISC_REG_E1HMF_MODE 0xa5f8 1400#define MISC_REG_E1HMF_MODE 0xa5f8
1252/* [RW 32] Debug only: spare RW register reset by core reset */ 1401/* [RW 32] Debug only: spare RW register reset by core reset */
1253#define MISC_REG_GENERIC_CR_0 0xa460 1402#define MISC_REG_GENERIC_CR_0 0xa460
1403#define MISC_REG_GENERIC_CR_1 0xa464
1254/* [RW 32] Debug only: spare RW register reset by por reset */ 1404/* [RW 32] Debug only: spare RW register reset by por reset */
1255#define MISC_REG_GENERIC_POR_1 0xa474 1405#define MISC_REG_GENERIC_POR_1 0xa474
1256/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of 1406/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
@@ -1373,6 +1523,14 @@
1373#define MISC_REG_PLL_STORM_CTRL_2 0xa298 1523#define MISC_REG_PLL_STORM_CTRL_2 0xa298
1374#define MISC_REG_PLL_STORM_CTRL_3 0xa29c 1524#define MISC_REG_PLL_STORM_CTRL_3 0xa29c
1375#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0 1525#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0
1526/* [R 1] Status of 4 port mode enable input pin. */
1527#define MISC_REG_PORT4MODE_EN 0xa750
1528/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 -
1529 * the port4mode_en output is equal to 4 port mode input pin; if it is 1 -
1530 * the port4mode_en output is equal to bit[1] of this register; [1] -
1531 * Overwrite value. If bit[0] of this register is 1 this is the value that
1532 * receives the port4mode_en output . */
1533#define MISC_REG_PORT4MODE_EN_OVWR 0xa720
1376/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset; 1534/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
1377 write/read zero = the specific block is in reset; addr 0-wr- the write 1535 write/read zero = the specific block is in reset; addr 0-wr- the write
1378 value will be written to the register; addr 1-set - one will be written 1536 value will be written to the register; addr 1-set - one will be written
@@ -1656,8 +1814,91 @@
1656/* [R 32] Interrupt register #0 read */ 1814/* [R 32] Interrupt register #0 read */
1657#define NIG_REG_NIG_INT_STS_0 0x103b0 1815#define NIG_REG_NIG_INT_STS_0 0x103b0
1658#define NIG_REG_NIG_INT_STS_1 0x103c0 1816#define NIG_REG_NIG_INT_STS_1 0x103c0
1659/* [R 32] Parity register #0 read */ 1817/* [R 32] Legacy E1 and E1H location for parity error status register. */
1660#define NIG_REG_NIG_PRTY_STS 0x103d0 1818#define NIG_REG_NIG_PRTY_STS 0x103d0
1819/* [R 32] Parity register #0 read */
1820#define NIG_REG_NIG_PRTY_STS_0 0x183bc
1821#define NIG_REG_NIG_PRTY_STS_1 0x183cc
1822/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
1823 * Ethernet header. */
1824#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038
1825/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in
1826 * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be
1827 * disabled when this bit is set. */
1828#define NIG_REG_P0_HWPFC_ENABLE 0x18078
1829#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480
1830#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440
1831/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
1832 * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
1833 * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
1834 * priority field is extracted from the outer-most VLAN in receive packet.
1835 * Only COS 0 and COS 1 are supported in E2. */
1836#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054
1837/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
1838 * priority is mapped to COS 0 when the corresponding mask bit is 1. More
1839 * than one bit may be set; allowing multiple priorities to be mapped to one
1840 * COS. */
1841#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058
1842/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
1843 * priority is mapped to COS 1 when the corresponding mask bit is 1. More
1844 * than one bit may be set; allowing multiple priorities to be mapped to one
1845 * COS. */
1846#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c
1847/* [RW 15] Specify which of the credit registers the client is to be mapped
1848 * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
1849 * clients that are not subject to WFQ credit blocking - their
1850 * specifications here are not used. */
1851#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0
1852/* [RW 5] Specify whether the client competes directly in the strict
1853 * priority arbiter. The bits are mapped according to client ID (client IDs
1854 * are defined in tx_arb_priority_client). Default value is set to enable
1855 * strict priorities for clients 0-2 -- management and debug traffic. */
1856#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8
1857/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The
1858 * bits are mapped according to client ID (client IDs are defined in
1859 * tx_arb_priority_client). Default value is 0 for not using WFQ credit
1860 * blocking. */
1861#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec
1862/* [RW 32] Specify the upper bound that credit register 0 is allowed to
1863 * reach. */
1864#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c
1865#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110
1866/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
1867 * when it is time to increment. */
1868#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8
1869#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc
1870/* [RW 12] Specify the number of strict priority arbitration slots between
1871 * two round-robin arbitration slots to avoid starvation. A value of 0 means
1872 * no strict priority cycles - the strict priority with anti-starvation
1873 * arbiter becomes a round-robin arbiter. */
1874#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4
1875/* [RW 15] Specify the client number to be assigned to each priority of the
1876 * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0]
1877 * are for priority 0 client; bits [14:12] are for priority 4 client. The
1878 * clients are assigned the following IDs: 0-management; 1-debug traffic
1879 * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
1880 * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000)
1881 * for management at priority 0; debug traffic at priorities 1 and 2; COS0
1882 * traffic at priority 3; and COS1 traffic at priority 4. */
1883#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4
1884#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
1885#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460
1886/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
1887 * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
1888 * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
1889 * priority field is extracted from the outer-most VLAN in receive packet.
1890 * Only COS 0 and COS 1 are supported in E2. */
1891#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8
1892/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
1893 * priority is mapped to COS 0 when the corresponding mask bit is 1. More
1894 * than one bit may be set; allowing multiple priorities to be mapped to one
1895 * COS. */
1896#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac
1897/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
1898 * priority is mapped to COS 1 when the corresponding mask bit is 1. More
1899 * than one bit may be set; allowing multiple priorities to be mapped to one
1900 * COS. */
1901#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0
1661/* [RW 1] Pause enable for port0. This register may get 1 only when 1902/* [RW 1] Pause enable for port0. This register may get 1 only when
1662 ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same 1903 ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
1663 port */ 1904 port */
@@ -1742,6 +1983,10 @@
1742/* [RW 1] Disable processing further tasks from port 4 (after ending the 1983/* [RW 1] Disable processing further tasks from port 4 (after ending the
1743 current task in process). */ 1984 current task in process). */
1744#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c 1985#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
1986#define PBF_REG_DISABLE_PF 0x1402e8
1987/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
1988 * Ethernet header. */
1989#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
1745#define PBF_REG_IF_ENABLE_REG 0x140044 1990#define PBF_REG_IF_ENABLE_REG 0x140044
1746/* [RW 1] Init bit. When set the initial credits are copied to the credit 1991/* [RW 1] Init bit. When set the initial credits are copied to the credit
1747 registers (except the port credits). Should be set and then reset after 1992 registers (except the port credits). Should be set and then reset after
@@ -1765,6 +2010,8 @@
1765#define PBF_REG_MAC_IF1_ENABLE 0x140034 2010#define PBF_REG_MAC_IF1_ENABLE 0x140034
1766/* [RW 1] Enable for the loopback interface. */ 2011/* [RW 1] Enable for the loopback interface. */
1767#define PBF_REG_MAC_LB_ENABLE 0x140040 2012#define PBF_REG_MAC_LB_ENABLE 0x140040
2013/* [RW 6] Bit-map indicating which headers must appear in the packet */
2014#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
1768/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause 2015/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
1769 not suppoterd. */ 2016 not suppoterd. */
1770#define PBF_REG_P0_ARB_THRSH 0x1400e4 2017#define PBF_REG_P0_ARB_THRSH 0x1400e4
@@ -1804,6 +2051,259 @@
1804#define PB_REG_PB_PRTY_MASK 0x38 2051#define PB_REG_PB_PRTY_MASK 0x38
1805/* [R 4] Parity register #0 read */ 2052/* [R 4] Parity register #0 read */
1806#define PB_REG_PB_PRTY_STS 0x2c 2053#define PB_REG_PB_PRTY_STS 0x2c
2054#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
2055#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
2056#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
2057#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6)
2058#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7)
2059#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4)
2060#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3)
2061#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5)
2062#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2)
2063/* [R 8] Config space A attention dirty bits. Each bit indicates that the
2064 * corresponding PF generates config space A attention. Set by PXP. Reset by
2065 * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits
2066 * from both paths. */
2067#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010
2068/* [R 8] Config space B attention dirty bits. Each bit indicates that the
2069 * corresponding PF generates config space B attention. Set by PXP. Reset by
2070 * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits
2071 * from both paths. */
2072#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014
2073/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1
2074 * - enable. */
2075#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194
2076/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask;
2077 * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */
2078#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c
2079/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1
2080 * - enable. */
2081#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c
2082/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */
2083#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100
2084/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */
2085#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108
2086/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */
2087#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110
2088/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2089#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac
2090/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates
2091 * that the FLR register of the corresponding PF was set. Set by PXP. Reset
2092 * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits
2093 * from both paths. */
2094#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028
2095/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1
2096 * to a bit in this register in order to clear the corresponding bit in
2097 * flr_request_pf_7_0 register. Note: register contains bits from both
2098 * paths. */
2099#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418
2100/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit
2101 * indicates that the FLR register of the corresponding VF was set. Set by
2102 * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */
2103#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024
2104/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit
2105 * indicates that the FLR register of the corresponding VF was set. Set by
2106 * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */
2107#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018
2108/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit
2109 * indicates that the FLR register of the corresponding VF was set. Set by
2110 * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */
2111#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c
2112/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit
2113 * indicates that the FLR register of the corresponding VF was set. Set by
2114 * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */
2115#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020
2116/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit
2117 * 0 - Target memory read arrived with a correctable error. Bit 1 - Target
2118 * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW
2119 * arrived with a correctable error. Bit 3 - Configuration RW arrived with
2120 * an uncorrectable error. Bit 4 - Completion with Configuration Request
2121 * Retry Status. Bit 5 - Expansion ROM access received with a write request.
2122 * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and
2123 * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010;
2124 * and pcie_rx_last not asserted. */
2125#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068
2126#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c
2127#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
2128#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
2129#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
2130/* [R 9] Interrupt register #0 read */
2131#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
2132/* [RC 9] Interrupt register #0 read clear */
2133#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c
2134/* [R 2] Parity register #0 read */
2135#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8
2136/* [R 13] Details of first request received with error. [2:0] - PFID. [3] -
2137 * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion
2138 * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 -
2139 * completer abort. 3 - Illegal value for this field. [12] valid - indicates
2140 * if there was a completion error since the last time this register was
2141 * cleared. */
2142#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080
2143/* [R 18] Details of first ATS Translation Completion request received with
2144 * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code -
2145 * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 -
2146 * unsupported request. 2 - completer abort. 3 - Illegal value for this
2147 * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a
2148 * completion error since the last time this register was cleared. */
2149#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084
2150/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to
2151 * a bit in this register in order to clear the corresponding bit in
2152 * shadow_bme_pf_7_0 register. MCP should never use this unless a
2153 * work-around is needed. Note: register contains bits from both paths. */
2154#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458
2155/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the
2156 * VF enable register of the corresponding PF is written to 0 and was
2157 * previously 1. Set by PXP. Reset by MCP writing 1 to
2158 * sr_iov_disabled_request_clr. Note: register contains bits from both
2159 * paths. */
2160#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030
2161/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read
2162 * completion did not return yet. 1 - tag is unused. Same functionality as
2163 * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */
2164#define PGLUE_B_REG_TAGS_63_32 0x9244
2165/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1
2166 * - enable. */
2167#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170
2168/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */
2169#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4
2170/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */
2171#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc
2172/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */
2173#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4
2174/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2175#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0
2176/* [R 32] Address [31:0] of first read request not submitted due to error */
2177#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098
2178/* [R 32] Address [63:32] of first read request not submitted due to error */
2179#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c
2180/* [R 31] Details of first read request not submitted due to error. [4:0]
2181 * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request.
2182 * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] -
2183 * VFID. */
2184#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0
2185/* [R 26] Details of first read request not submitted due to error. [15:0]
2186 * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
2187 * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
2188 * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
2189 * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
2190 * indicates if there was a request not submitted due to error since the
2191 * last time this register was cleared. */
2192#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4
2193/* [R 32] Address [31:0] of first write request not submitted due to error */
2194#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088
2195/* [R 32] Address [63:32] of first write request not submitted due to error */
2196#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c
2197/* [R 31] Details of first write request not submitted due to error. [4:0]
2198 * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25]
2199 * - VFID. */
2200#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090
2201/* [R 26] Details of first write request not submitted due to error. [15:0]
2202 * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
2203 * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
2204 * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
2205 * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
2206 * indicates if there was a request not submitted due to error since the
2207 * last time this register was cleared. */
2208#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094
2209/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask;
2210 * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any
2211 * value (Byte resolution address). */
2212#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128
2213#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c
2214#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130
2215#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134
2216#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138
2217#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c
2218#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140
2219/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1
2220 * - enable. */
2221#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c
2222/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1
2223 * - enable. */
2224#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180
2225/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1
2226 * - enable. */
2227#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184
2228/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */
2229#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8
2230/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */
2231#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0
2232/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */
2233#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8
2234/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2235#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4
2236/* [R 26] Details of first target VF request accessing VF GRC space that
2237 * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write.
2238 * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a
2239 * request accessing VF GRC space that failed permission check since the
2240 * last time this register was cleared. Permission checks are: function
2241 * permission; R/W permission; address range permission. */
2242#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234
2243/* [R 31] Details of first target VF request with length violation (too many
2244 * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address).
2245 * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30]
2246 * valid - indicates if there was a request with length violation since the
2247 * last time this register was cleared. Length violations: length of more
2248 * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and
2249 * length is more than 1 DW. */
2250#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230
2251/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates
2252 * that there was a completion with uncorrectable error for the
2253 * corresponding PF. Set by PXP. Reset by MCP writing 1 to
2254 * was_error_pf_7_0_clr. */
2255#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c
2256/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1
2257 * to a bit in this register in order to clear the corresponding bit in
2258 * flr_request_pf_7_0 register. */
2259#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470
2260/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit
2261 * indicates that there was a completion with uncorrectable error for the
2262 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2263 * was_error_vf_127_96_clr. */
2264#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078
2265/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP
2266 * writes 1 to a bit in this register in order to clear the corresponding
2267 * bit in was_error_vf_127_96 register. */
2268#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474
2269/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit
2270 * indicates that there was a completion with uncorrectable error for the
2271 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2272 * was_error_vf_31_0_clr. */
2273#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c
2274/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes
2275 * 1 to a bit in this register in order to clear the corresponding bit in
2276 * was_error_vf_31_0 register. */
2277#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478
2278/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit
2279 * indicates that there was a completion with uncorrectable error for the
2280 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2281 * was_error_vf_63_32_clr. */
2282#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070
2283/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes
2284 * 1 to a bit in this register in order to clear the corresponding bit in
2285 * was_error_vf_63_32 register. */
2286#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c
2287/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit
2288 * indicates that there was a completion with uncorrectable error for the
2289 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2290 * was_error_vf_95_64_clr. */
2291#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074
2292/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes
2293 * 1 to a bit in this register in order to clear the corresponding bit in
2294 * was_error_vf_95_64 register. */
2295#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480
2296/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1
2297 * - enable. */
2298#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188
2299/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */
2300#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec
2301/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */
2302#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4
2303/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */
2304#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc
2305/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2306#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8
1807#define PRS_REG_A_PRSU_20 0x40134 2307#define PRS_REG_A_PRSU_20 0x40134
1808/* [R 8] debug only: CFC load request current credit. Transaction based. */ 2308/* [R 8] debug only: CFC load request current credit. Transaction based. */
1809#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164 2309#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164
@@ -1866,9 +2366,13 @@
1866#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018 2366#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018
1867#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c 2367#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c
1868#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020 2368#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020
2369/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
2370 * Ethernet header. */
2371#define PRS_REG_HDRS_AFTER_BASIC 0x40238
1869/* [RW 4] The increment value to send in the CFC load request message */ 2372/* [RW 4] The increment value to send in the CFC load request message */
1870#define PRS_REG_INC_VALUE 0x40048 2373#define PRS_REG_INC_VALUE 0x40048
1871/* [RW 1] If set indicates not to send messages to CFC on received packets */ 2374/* [RW 6] Bit-map indicating which headers must appear in the packet */
2375#define PRS_REG_MUST_HAVE_HDRS 0x40254
1872#define PRS_REG_NIC_MODE 0x40138 2376#define PRS_REG_NIC_MODE 0x40138
1873/* [RW 8] The 8-bit event ID for cases where there is no match on the 2377/* [RW 8] The 8-bit event ID for cases where there is no match on the
1874 connection. Used in packet start message to TCM. */ 2378 connection. Used in packet start message to TCM. */
@@ -1919,6 +2423,13 @@
1919#define PRS_REG_TCM_CURRENT_CREDIT 0x40160 2423#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
1920/* [R 8] debug only: TSDM current credit. Transaction based. */ 2424/* [R 8] debug only: TSDM current credit. Transaction based. */
1921#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c 2425#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
2426#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19)
2427#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20)
2428#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22)
2429#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23)
2430#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24)
2431#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
2432#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
1922/* [R 6] Debug only: Number of used entries in the data FIFO */ 2433/* [R 6] Debug only: Number of used entries in the data FIFO */
1923#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c 2434#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
1924/* [R 7] Debug only: Number of used entries in the header FIFO */ 2435/* [R 7] Debug only: Number of used entries in the header FIFO */
@@ -2244,8 +2755,17 @@
2244/* [RW 1] When '1'; requests will enter input buffers but wont get out 2755/* [RW 1] When '1'; requests will enter input buffers but wont get out
2245 towards the glue */ 2756 towards the glue */
2246#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330 2757#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330
2247/* [RW 1] 1 - SR will be aligned by 64B; 0 - SR will be aligned by 8B */ 2758/* [RW 4] Determines alignment of write SRs when a request is split into
2759 * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
2760 * aligned. 4 - 512B aligned. */
2248#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0 2761#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0
2762/* [RW 4] Determines alignment of read SRs when a request is split into
2763 * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
2764 * aligned. 4 - 512B aligned. */
2765#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c
2766/* [RW 1] when set the new alignment method (E2) will be applied; when reset
2767 * the original alignment method (E1 E1H) will be applied */
2768#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930
2249/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will 2769/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
2250 be asserted */ 2770 be asserted */
2251#define PXP2_REG_RQ_ELT_DISABLE 0x12066c 2771#define PXP2_REG_RQ_ELT_DISABLE 0x12066c
@@ -2436,7 +2956,8 @@
2436#define PXP_REG_PXP_INT_STS_1 0x103078 2956#define PXP_REG_PXP_INT_STS_1 0x103078
2437/* [RC 32] Interrupt register #0 read clear */ 2957/* [RC 32] Interrupt register #0 read clear */
2438#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c 2958#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c
2439/* [RW 26] Parity mask register #0 read/write */ 2959#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c
2960/* [RW 27] Parity mask register #0 read/write */
2440#define PXP_REG_PXP_PRTY_MASK 0x103094 2961#define PXP_REG_PXP_PRTY_MASK 0x103094
2441/* [R 26] Parity register #0 read */ 2962/* [R 26] Parity register #0 read */
2442#define PXP_REG_PXP_PRTY_STS 0x103088 2963#define PXP_REG_PXP_PRTY_STS 0x103088
@@ -2566,6 +3087,7 @@
2566#define QM_REG_PAUSESTATE7 0x16e698 3087#define QM_REG_PAUSESTATE7 0x16e698
2567/* [RW 2] The PCI attributes field used in the PCI request. */ 3088/* [RW 2] The PCI attributes field used in the PCI request. */
2568#define QM_REG_PCIREQAT 0x168054 3089#define QM_REG_PCIREQAT 0x168054
3090#define QM_REG_PF_EN 0x16e70c
2569/* [R 16] The byte credit of port 0 */ 3091/* [R 16] The byte credit of port 0 */
2570#define QM_REG_PORT0BYTECRD 0x168300 3092#define QM_REG_PORT0BYTECRD 0x168300
2571/* [R 16] The byte credit of port 1 */ 3093/* [R 16] The byte credit of port 1 */
@@ -3402,6 +3924,14 @@
3402/* [R 32] Parity register #0 read */ 3924/* [R 32] Parity register #0 read */
3403#define TSEM_REG_TSEM_PRTY_STS_0 0x180114 3925#define TSEM_REG_TSEM_PRTY_STS_0 0x180114
3404#define TSEM_REG_TSEM_PRTY_STS_1 0x180124 3926#define TSEM_REG_TSEM_PRTY_STS_1 0x180124
3927/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
3928 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
3929#define TSEM_REG_VFPF_ERR_NUM 0x180380
3930/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
3931 * [10:8] of the address should be the offset within the accessed LCID
3932 * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
3933 * LCID100. The RBC address should be 12'ha64. */
3934#define UCM_REG_AG_CTX 0xe2000
3405/* [R 5] Used to read the XX protection CAM occupancy counter. */ 3935/* [R 5] Used to read the XX protection CAM occupancy counter. */
3406#define UCM_REG_CAM_OCCUP 0xe0170 3936#define UCM_REG_CAM_OCCUP 0xe0170
3407/* [RW 1] CDU AG read Interface enable. If 0 - the request input is 3937/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3851,6 +4381,17 @@
3851/* [R 32] Parity register #0 read */ 4381/* [R 32] Parity register #0 read */
3852#define USEM_REG_USEM_PRTY_STS_0 0x300124 4382#define USEM_REG_USEM_PRTY_STS_0 0x300124
3853#define USEM_REG_USEM_PRTY_STS_1 0x300134 4383#define USEM_REG_USEM_PRTY_STS_1 0x300134
4384/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
4385 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
4386#define USEM_REG_VFPF_ERR_NUM 0x300380
4387#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0)
4388#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1)
4389#define VFC_REG_MEMORIES_RST 0x1943c
4390/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
4391 * [12:8] of the address should be the offset within the accessed LCID
4392 * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
4393 * LCID100. The RBC address should be 13'ha64. */
4394#define XCM_REG_AG_CTX 0x28000
3854/* [RW 2] The queue index for registration on Aux1 counter flag. */ 4395/* [RW 2] The queue index for registration on Aux1 counter flag. */
3855#define XCM_REG_AUX1_Q 0x20134 4396#define XCM_REG_AUX1_Q 0x20134
3856/* [RW 2] Per each decision rule the queue index to register to. */ 4397/* [RW 2] Per each decision rule the queue index to register to. */
@@ -4333,6 +4874,9 @@
4333#define XSEM_REG_TS_8_AS 0x280058 4874#define XSEM_REG_TS_8_AS 0x280058
4334/* [RW 3] The arbitration scheme of time_slot 9 */ 4875/* [RW 3] The arbitration scheme of time_slot 9 */
4335#define XSEM_REG_TS_9_AS 0x28005c 4876#define XSEM_REG_TS_9_AS 0x28005c
4877/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
4878 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
4879#define XSEM_REG_VFPF_ERR_NUM 0x280380
4336/* [RW 32] Interrupt mask register #0 read/write */ 4880/* [RW 32] Interrupt mask register #0 read/write */
4337#define XSEM_REG_XSEM_INT_MASK_0 0x280110 4881#define XSEM_REG_XSEM_INT_MASK_0 0x280110
4338#define XSEM_REG_XSEM_INT_MASK_1 0x280120 4882#define XSEM_REG_XSEM_INT_MASK_1 0x280120
@@ -4371,6 +4915,23 @@
4371#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3) 4915#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
4372#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3) 4916#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
4373#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3) 4917#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
4918#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3)
4919#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
4920#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3)
4921#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3)
4922#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3)
4923#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3)
4924#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3)
4925#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3)
4926#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3)
4927#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3)
4928#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3)
4929#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3)
4930#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3)
4931#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3)
4932#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3)
4933#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3)
4934#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3)
4374#define EMAC_LED_1000MB_OVERRIDE (1L<<1) 4935#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
4375#define EMAC_LED_100MB_OVERRIDE (1L<<2) 4936#define EMAC_LED_100MB_OVERRIDE (1L<<2)
4376#define EMAC_LED_10MB_OVERRIDE (1L<<3) 4937#define EMAC_LED_10MB_OVERRIDE (1L<<3)
@@ -4478,6 +5039,8 @@
4478#define HW_LOCK_RESOURCE_SPIO 2 5039#define HW_LOCK_RESOURCE_SPIO 2
4479#define HW_LOCK_RESOURCE_UNDI 5 5040#define HW_LOCK_RESOURCE_UNDI 5
4480#define PRS_FLAG_OVERETH_IPV4 1 5041#define PRS_FLAG_OVERETH_IPV4 1
5042#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
5043#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
4481#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) 5044#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
4482#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) 5045#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
4483#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) 5046#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
@@ -4504,6 +5067,8 @@
4504#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20) 5067#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
4505#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (1<<0) 5068#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (1<<0)
4506#define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT (1<<31) 5069#define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT (1<<31)
5070#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
5071#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
4507#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (1<<3) 5072#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (1<<3)
4508#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (1<<2) 5073#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (1<<2)
4509#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (1<<5) 5074#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (1<<5)
@@ -4796,6 +5361,253 @@
4796#define PCI_ID_VAL1 0x434 5361#define PCI_ID_VAL1 0x434
4797#define PCI_ID_VAL2 0x438 5362#define PCI_ID_VAL2 0x438
4798 5363
5364#define PXPCS_TL_CONTROL_5 0x814
5365#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
5366#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/
5367#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/
5368#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/
5369#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/
5370#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/
5371#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/
5372#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/
5373#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/
5374#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/
5375#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/
5376#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/
5377#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/
5378#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/
5379#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/
5380#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/
5381#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/
5382#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/
5383#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/
5384#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/
5385#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/
5386#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/
5387#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/
5388#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/
5389#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/
5390#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/
5391#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/
5392#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/
5393#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/
5394#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/
5395
5396
5397#define PXPCS_TL_FUNC345_STAT 0x854
5398#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */
5399#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\
5400 (1 << 28) /* Unsupported Request Error Status in function4, if \
5401 set, generate pcie_err_attn output when this error is seen. WC */
5402#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\
5403 (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
5404 generate pcie_err_attn output when this error is seen.. WC */
5405#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\
5406 (1 << 26) /* Malformed TLP Status Status in function 4, if set, \
5407 generate pcie_err_attn output when this error is seen.. WC */
5408#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\
5409 (1 << 25) /* Receiver Overflow Status Status in function 4, if \
5410 set, generate pcie_err_attn output when this error is seen.. WC \
5411 */
5412#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\
5413 (1 << 24) /* Unexpected Completion Status Status in function 4, \
5414 if set, generate pcie_err_attn output when this error is seen. WC \
5415 */
5416#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\
5417 (1 << 23) /* Receive UR Statusin function 4. If set, generate \
5418 pcie_err_attn output when this error is seen. WC */
5419#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\
5420 (1 << 22) /* Completer Timeout Status Status in function 4, if \
5421 set, generate pcie_err_attn output when this error is seen. WC */
5422#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\
5423 (1 << 21) /* Flow Control Protocol Error Status Status in \
5424 function 4, if set, generate pcie_err_attn output when this error \
5425 is seen. WC */
5426#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\
5427 (1 << 20) /* Poisoned Error Status Status in function 4, if set, \
5428 generate pcie_err_attn output when this error is seen.. WC */
5429#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */
5430#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\
5431 (1 << 18) /* Unsupported Request Error Status in function3, if \
5432 set, generate pcie_err_attn output when this error is seen. WC */
5433#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\
5434 (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
5435 generate pcie_err_attn output when this error is seen.. WC */
5436#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\
5437 (1 << 16) /* Malformed TLP Status Status in function 3, if set, \
5438 generate pcie_err_attn output when this error is seen.. WC */
5439#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\
5440 (1 << 15) /* Receiver Overflow Status Status in function 3, if \
5441 set, generate pcie_err_attn output when this error is seen.. WC \
5442 */
5443#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\
5444 (1 << 14) /* Unexpected Completion Status Status in function 3, \
5445 if set, generate pcie_err_attn output when this error is seen. WC \
5446 */
5447#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\
5448 (1 << 13) /* Receive UR Statusin function 3. If set, generate \
5449 pcie_err_attn output when this error is seen. WC */
5450#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\
5451 (1 << 12) /* Completer Timeout Status Status in function 3, if \
5452 set, generate pcie_err_attn output when this error is seen. WC */
5453#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\
5454 (1 << 11) /* Flow Control Protocol Error Status Status in \
5455 function 3, if set, generate pcie_err_attn output when this error \
5456 is seen. WC */
5457#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\
5458 (1 << 10) /* Poisoned Error Status Status in function 3, if set, \
5459 generate pcie_err_attn output when this error is seen.. WC */
5460#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */
5461#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\
5462 (1 << 8) /* Unsupported Request Error Status for Function 2, if \
5463 set, generate pcie_err_attn output when this error is seen. WC */
5464#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\
5465 (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
5466 generate pcie_err_attn output when this error is seen.. WC */
5467#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\
5468 (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
5469 generate pcie_err_attn output when this error is seen.. WC */
5470#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\
5471 (1 << 5) /* Receiver Overflow Status Status for Function 2, if \
5472 set, generate pcie_err_attn output when this error is seen.. WC \
5473 */
5474#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\
5475 (1 << 4) /* Unexpected Completion Status Status for Function 2, \
5476 if set, generate pcie_err_attn output when this error is seen. WC \
5477 */
5478#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\
5479 (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
5480 pcie_err_attn output when this error is seen. WC */
5481#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\
5482 (1 << 2) /* Completer Timeout Status Status for Function 2, if \
5483 set, generate pcie_err_attn output when this error is seen. WC */
5484#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\
5485 (1 << 1) /* Flow Control Protocol Error Status Status for \
5486 Function 2, if set, generate pcie_err_attn output when this error \
5487 is seen. WC */
5488#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\
5489 (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
5490 generate pcie_err_attn output when this error is seen.. WC */
5491
5492
5493#define PXPCS_TL_FUNC678_STAT 0x85C
5494#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */
5495#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\
5496 (1 << 28) /* Unsupported Request Error Status in function7, if \
5497 set, generate pcie_err_attn output when this error is seen. WC */
5498#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\
5499 (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
5500 generate pcie_err_attn output when this error is seen.. WC */
5501#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\
5502 (1 << 26) /* Malformed TLP Status Status in function 7, if set, \
5503 generate pcie_err_attn output when this error is seen.. WC */
5504#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\
5505 (1 << 25) /* Receiver Overflow Status Status in function 7, if \
5506 set, generate pcie_err_attn output when this error is seen.. WC \
5507 */
5508#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\
5509 (1 << 24) /* Unexpected Completion Status Status in function 7, \
5510 if set, generate pcie_err_attn output when this error is seen. WC \
5511 */
5512#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\
5513 (1 << 23) /* Receive UR Statusin function 7. If set, generate \
5514 pcie_err_attn output when this error is seen. WC */
5515#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\
5516 (1 << 22) /* Completer Timeout Status Status in function 7, if \
5517 set, generate pcie_err_attn output when this error is seen. WC */
5518#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\
5519 (1 << 21) /* Flow Control Protocol Error Status Status in \
5520 function 7, if set, generate pcie_err_attn output when this error \
5521 is seen. WC */
5522#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\
5523 (1 << 20) /* Poisoned Error Status Status in function 7, if set, \
5524 generate pcie_err_attn output when this error is seen.. WC */
5525#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */
5526#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\
5527 (1 << 18) /* Unsupported Request Error Status in function6, if \
5528 set, generate pcie_err_attn output when this error is seen. WC */
5529#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\
5530 (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
5531 generate pcie_err_attn output when this error is seen.. WC */
5532#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\
5533 (1 << 16) /* Malformed TLP Status Status in function 6, if set, \
5534 generate pcie_err_attn output when this error is seen.. WC */
5535#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\
5536 (1 << 15) /* Receiver Overflow Status Status in function 6, if \
5537 set, generate pcie_err_attn output when this error is seen.. WC \
5538 */
5539#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\
5540 (1 << 14) /* Unexpected Completion Status Status in function 6, \
5541 if set, generate pcie_err_attn output when this error is seen. WC \
5542 */
5543#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\
5544 (1 << 13) /* Receive UR Statusin function 6. If set, generate \
5545 pcie_err_attn output when this error is seen. WC */
5546#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\
5547 (1 << 12) /* Completer Timeout Status Status in function 6, if \
5548 set, generate pcie_err_attn output when this error is seen. WC */
5549#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\
5550 (1 << 11) /* Flow Control Protocol Error Status Status in \
5551 function 6, if set, generate pcie_err_attn output when this error \
5552 is seen. WC */
5553#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\
5554 (1 << 10) /* Poisoned Error Status Status in function 6, if set, \
5555 generate pcie_err_attn output when this error is seen.. WC */
5556#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */
5557#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\
5558 (1 << 8) /* Unsupported Request Error Status for Function 5, if \
5559 set, generate pcie_err_attn output when this error is seen. WC */
5560#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\
5561 (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
5562 generate pcie_err_attn output when this error is seen.. WC */
5563#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\
5564 (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
5565 generate pcie_err_attn output when this error is seen.. WC */
5566#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\
5567 (1 << 5) /* Receiver Overflow Status Status for Function 5, if \
5568 set, generate pcie_err_attn output when this error is seen.. WC \
5569 */
5570#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\
5571 (1 << 4) /* Unexpected Completion Status Status for Function 5, \
5572 if set, generate pcie_err_attn output when this error is seen. WC \
5573 */
5574#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\
5575 (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
5576 pcie_err_attn output when this error is seen. WC */
5577#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\
5578 (1 << 2) /* Completer Timeout Status Status for Function 5, if \
5579 set, generate pcie_err_attn output when this error is seen. WC */
5580#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\
5581 (1 << 1) /* Flow Control Protocol Error Status Status for \
5582 Function 5, if set, generate pcie_err_attn output when this error \
5583 is seen. WC */
5584#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\
5585 (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
5586 generate pcie_err_attn output when this error is seen.. WC */
5587
5588
5589#define BAR_USTRORM_INTMEM 0x400000
5590#define BAR_CSTRORM_INTMEM 0x410000
5591#define BAR_XSTRORM_INTMEM 0x420000
5592#define BAR_TSTRORM_INTMEM 0x430000
5593
5594/* for accessing the IGU in case of status block ACK */
5595#define BAR_IGU_INTMEM 0x440000
5596
5597#define BAR_DOORBELL_OFFSET 0x800000
5598
5599#define BAR_ME_REGISTER 0x450000
5600#define ME_REG_PF_NUM_SHIFT 0
5601#define ME_REG_PF_NUM\
5602 (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
5603#define ME_REG_VF_VALID (1<<8)
5604#define ME_REG_VF_NUM_SHIFT 9
5605#define ME_REG_VF_NUM_MASK (0x3f<<ME_REG_VF_NUM_SHIFT)
5606#define ME_REG_VF_ERR (0x1<<3)
5607#define ME_REG_ABS_PF_NUM_SHIFT 16
5608#define ME_REG_ABS_PF_NUM\
5609 (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
5610
4799 5611
4800#define MDIO_REG_BANK_CL73_IEEEB0 0x0 5612#define MDIO_REG_BANK_CL73_IEEEB0 0x0
4801#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 5613#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
@@ -5276,6 +6088,11 @@ Theotherbitsarereservedandshouldbezero*/
5276#define IGU_INT_NOP 2 6088#define IGU_INT_NOP 2
5277#define IGU_INT_NOP2 3 6089#define IGU_INT_NOP2 3
5278 6090
6091#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
6092#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
6093#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
6094#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup 3
6095
5279#define COMMAND_REG_INT_ACK 0x0 6096#define COMMAND_REG_INT_ACK 0x0
5280#define COMMAND_REG_PROD_UPD 0x4 6097#define COMMAND_REG_PROD_UPD 0x4
5281#define COMMAND_REG_ATTN_BITS_UPD 0x8 6098#define COMMAND_REG_ATTN_BITS_UPD 0x8
@@ -5318,6 +6135,50 @@ Theotherbitsarereservedandshouldbezero*/
5318#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6 6135#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
5319 6136
5320#define IGU_REG_RESERVED_UPPER 0x05ff 6137#define IGU_REG_RESERVED_UPPER 0x05ff
6138/* Fields of IGU PF CONFIGRATION REGISTER */
6139#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
6140#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
6141#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
6142#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
6143#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
6144#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
6145
6146/* Fields of IGU VF CONFIGRATION REGISTER */
6147#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
6148#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
6149#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
6150#define IGU_VF_CONF_PARENT_SHIFT 2 /* Parent PF */
6151#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
6152
6153
6154#define IGU_BC_DSB_NUM_SEGS 5
6155#define IGU_BC_NDSB_NUM_SEGS 2
6156#define IGU_NORM_DSB_NUM_SEGS 2
6157#define IGU_NORM_NDSB_NUM_SEGS 1
6158#define IGU_BC_BASE_DSB_PROD 128
6159#define IGU_NORM_BASE_DSB_PROD 136
6160
6161#define IGU_CTRL_CMD_TYPE_WR\
6162 1
6163#define IGU_CTRL_CMD_TYPE_RD\
6164 0
6165
6166#define IGU_SEG_ACCESS_NORM 0
6167#define IGU_SEG_ACCESS_DEF 1
6168#define IGU_SEG_ACCESS_ATTN 2
6169
6170 /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
6171 [5:2] = 0; [1:0] = PF number) */
6172#define IGU_FID_ENCODE_IS_PF (0x1<<6)
6173#define IGU_FID_ENCODE_IS_PF_SHIFT 6
6174#define IGU_FID_VF_NUM_MASK (0x3f)
6175#define IGU_FID_PF_NUM_MASK (0x7)
6176
6177#define IGU_REG_MAPPING_MEMORY_VALID (1<<0)
6178#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK (0x3F<<1)
6179#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT 1
6180#define IGU_REG_MAPPING_MEMORY_FID_MASK (0x7F<<7)
6181#define IGU_REG_MAPPING_MEMORY_FID_SHIFT 7
5321 6182
5322 6183
5323#define CDU_REGION_NUMBER_XCM_AG 2 6184#define CDU_REGION_NUMBER_XCM_AG 2
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 32b6b1033a3b..ad7aa55efb63 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -185,20 +185,12 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
185 /* loader */ 185 /* loader */
186 if (bp->executer_idx) { 186 if (bp->executer_idx) {
187 int loader_idx = PMF_DMAE_C(bp); 187 int loader_idx = PMF_DMAE_C(bp);
188 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
189 true, DMAE_COMP_GRC);
190 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
188 191
189 memset(dmae, 0, sizeof(struct dmae_command)); 192 memset(dmae, 0, sizeof(struct dmae_command));
190 193 dmae->opcode = opcode;
191 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
192 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
193 DMAE_CMD_DST_RESET |
194#ifdef __BIG_ENDIAN
195 DMAE_CMD_ENDIANITY_B_DW_SWAP |
196#else
197 DMAE_CMD_ENDIANITY_DW_SWAP |
198#endif
199 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
200 DMAE_CMD_PORT_0) |
201 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
202 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); 194 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
203 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); 195 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
204 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + 196 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
@@ -257,19 +249,10 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
257 249
258 bp->executer_idx = 0; 250 bp->executer_idx = 0;
259 251
260 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 252 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
261 DMAE_CMD_C_ENABLE |
262 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
263#ifdef __BIG_ENDIAN
264 DMAE_CMD_ENDIANITY_B_DW_SWAP |
265#else
266 DMAE_CMD_ENDIANITY_DW_SWAP |
267#endif
268 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
269 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
270 253
271 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 254 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
272 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); 255 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
273 dmae->src_addr_lo = bp->port.port_stx >> 2; 256 dmae->src_addr_lo = bp->port.port_stx >> 2;
274 dmae->src_addr_hi = 0; 257 dmae->src_addr_hi = 0;
275 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 258 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
@@ -280,7 +263,7 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
280 dmae->comp_val = 1; 263 dmae->comp_val = 1;
281 264
282 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 265 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
283 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); 266 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
284 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; 267 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
285 dmae->src_addr_hi = 0; 268 dmae->src_addr_hi = 0;
286 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + 269 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
@@ -301,7 +284,6 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
301{ 284{
302 struct dmae_command *dmae; 285 struct dmae_command *dmae;
303 int port = BP_PORT(bp); 286 int port = BP_PORT(bp);
304 int vn = BP_E1HVN(bp);
305 u32 opcode; 287 u32 opcode;
306 int loader_idx = PMF_DMAE_C(bp); 288 int loader_idx = PMF_DMAE_C(bp);
307 u32 mac_addr; 289 u32 mac_addr;
@@ -316,16 +298,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
316 bp->executer_idx = 0; 298 bp->executer_idx = 0;
317 299
318 /* MCP */ 300 /* MCP */
319 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | 301 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
320 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | 302 true, DMAE_COMP_GRC);
321 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
322#ifdef __BIG_ENDIAN
323 DMAE_CMD_ENDIANITY_B_DW_SWAP |
324#else
325 DMAE_CMD_ENDIANITY_DW_SWAP |
326#endif
327 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
328 (vn << DMAE_CMD_E1HVN_SHIFT));
329 303
330 if (bp->port.port_stx) { 304 if (bp->port.port_stx) {
331 305
@@ -356,16 +330,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
356 } 330 }
357 331
358 /* MAC */ 332 /* MAC */
359 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 333 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
360 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | 334 true, DMAE_COMP_GRC);
361 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
362#ifdef __BIG_ENDIAN
363 DMAE_CMD_ENDIANITY_B_DW_SWAP |
364#else
365 DMAE_CMD_ENDIANITY_DW_SWAP |
366#endif
367 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
368 (vn << DMAE_CMD_E1HVN_SHIFT));
369 335
370 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { 336 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
371 337
@@ -376,13 +342,21 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
376 BIGMAC_REGISTER_TX_STAT_GTBYT */ 342 BIGMAC_REGISTER_TX_STAT_GTBYT */
377 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 343 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
378 dmae->opcode = opcode; 344 dmae->opcode = opcode;
379 dmae->src_addr_lo = (mac_addr + 345 if (CHIP_IS_E1x(bp)) {
346 dmae->src_addr_lo = (mac_addr +
380 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 347 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
348 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
349 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
350 } else {
351 dmae->src_addr_lo = (mac_addr +
352 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
353 dmae->len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
354 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
355 }
356
381 dmae->src_addr_hi = 0; 357 dmae->src_addr_hi = 0;
382 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 358 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
383 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 359 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
384 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
385 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
386 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 360 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
387 dmae->comp_addr_hi = 0; 361 dmae->comp_addr_hi = 0;
388 dmae->comp_val = 1; 362 dmae->comp_val = 1;
@@ -391,15 +365,31 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
391 BIGMAC_REGISTER_RX_STAT_GRIPJ */ 365 BIGMAC_REGISTER_RX_STAT_GRIPJ */
392 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 366 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
393 dmae->opcode = opcode; 367 dmae->opcode = opcode;
394 dmae->src_addr_lo = (mac_addr +
395 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
396 dmae->src_addr_hi = 0; 368 dmae->src_addr_hi = 0;
397 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 369 if (CHIP_IS_E1x(bp)) {
370 dmae->src_addr_lo = (mac_addr +
371 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
372 dmae->dst_addr_lo =
373 U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
398 offsetof(struct bmac1_stats, rx_stat_gr64_lo)); 374 offsetof(struct bmac1_stats, rx_stat_gr64_lo));
399 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 375 dmae->dst_addr_hi =
376 U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
400 offsetof(struct bmac1_stats, rx_stat_gr64_lo)); 377 offsetof(struct bmac1_stats, rx_stat_gr64_lo));
401 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - 378 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
402 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 379 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
380 } else {
381 dmae->src_addr_lo =
382 (mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
383 dmae->dst_addr_lo =
384 U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
385 offsetof(struct bmac2_stats, rx_stat_gr64_lo));
386 dmae->dst_addr_hi =
387 U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
388 offsetof(struct bmac2_stats, rx_stat_gr64_lo));
389 dmae->len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
390 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
391 }
392
403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 393 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
404 dmae->comp_addr_hi = 0; 394 dmae->comp_addr_hi = 0;
405 dmae->comp_val = 1; 395 dmae->comp_val = 1;
@@ -480,16 +470,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
480 dmae->comp_val = 1; 470 dmae->comp_val = 1;
481 471
482 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 472 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
483 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 473 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
484 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | 474 true, DMAE_COMP_PCI);
485 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
486#ifdef __BIG_ENDIAN
487 DMAE_CMD_ENDIANITY_B_DW_SWAP |
488#else
489 DMAE_CMD_ENDIANITY_DW_SWAP |
490#endif
491 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
492 (vn << DMAE_CMD_E1HVN_SHIFT));
493 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : 475 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
494 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; 476 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
495 dmae->src_addr_hi = 0; 477 dmae->src_addr_hi = 0;
@@ -519,16 +501,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
519 bp->executer_idx = 0; 501 bp->executer_idx = 0;
520 memset(dmae, 0, sizeof(struct dmae_command)); 502 memset(dmae, 0, sizeof(struct dmae_command));
521 503
522 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | 504 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
523 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | 505 true, DMAE_COMP_PCI);
524 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
525#ifdef __BIG_ENDIAN
526 DMAE_CMD_ENDIANITY_B_DW_SWAP |
527#else
528 DMAE_CMD_ENDIANITY_DW_SWAP |
529#endif
530 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
531 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
532 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 506 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
533 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 507 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
534 dmae->dst_addr_lo = bp->func_stx >> 2; 508 dmae->dst_addr_lo = bp->func_stx >> 2;
@@ -568,7 +542,6 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
568 542
569static void bnx2x_bmac_stats_update(struct bnx2x *bp) 543static void bnx2x_bmac_stats_update(struct bnx2x *bp)
570{ 544{
571 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
572 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 545 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
573 struct bnx2x_eth_stats *estats = &bp->eth_stats; 546 struct bnx2x_eth_stats *estats = &bp->eth_stats;
574 struct { 547 struct {
@@ -576,35 +549,74 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
576 u32 hi; 549 u32 hi;
577 } diff; 550 } diff;
578 551
579 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 552 if (CHIP_IS_E1x(bp)) {
580 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 553 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
581 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 554
582 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 555 /* the macros below will use "bmac1_stats" type */
583 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 556 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
584 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 557 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
585 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 558 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
586 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 559 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
587 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); 560 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
588 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 561 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
589 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 562 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
590 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 563 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
591 UPDATE_STAT64(tx_stat_gt127, 564 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
565 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
566 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
567 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
568 UPDATE_STAT64(tx_stat_gt127,
592 tx_stat_etherstatspkts65octetsto127octets); 569 tx_stat_etherstatspkts65octetsto127octets);
593 UPDATE_STAT64(tx_stat_gt255, 570 UPDATE_STAT64(tx_stat_gt255,
594 tx_stat_etherstatspkts128octetsto255octets); 571 tx_stat_etherstatspkts128octetsto255octets);
595 UPDATE_STAT64(tx_stat_gt511, 572 UPDATE_STAT64(tx_stat_gt511,
596 tx_stat_etherstatspkts256octetsto511octets); 573 tx_stat_etherstatspkts256octetsto511octets);
597 UPDATE_STAT64(tx_stat_gt1023, 574 UPDATE_STAT64(tx_stat_gt1023,
598 tx_stat_etherstatspkts512octetsto1023octets); 575 tx_stat_etherstatspkts512octetsto1023octets);
599 UPDATE_STAT64(tx_stat_gt1518, 576 UPDATE_STAT64(tx_stat_gt1518,
600 tx_stat_etherstatspkts1024octetsto1522octets); 577 tx_stat_etherstatspkts1024octetsto1522octets);
601 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); 578 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
602 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); 579 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
603 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); 580 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
604 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); 581 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
605 UPDATE_STAT64(tx_stat_gterr, 582 UPDATE_STAT64(tx_stat_gterr,
606 tx_stat_dot3statsinternalmactransmiterrors); 583 tx_stat_dot3statsinternalmactransmiterrors);
607 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); 584 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
585
586 } else {
587 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
588
589 /* the macros below will use "bmac2_stats" type */
590 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
591 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
592 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
593 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
594 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
595 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
596 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
597 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
598 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
599 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
600 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
601 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
602 UPDATE_STAT64(tx_stat_gt127,
603 tx_stat_etherstatspkts65octetsto127octets);
604 UPDATE_STAT64(tx_stat_gt255,
605 tx_stat_etherstatspkts128octetsto255octets);
606 UPDATE_STAT64(tx_stat_gt511,
607 tx_stat_etherstatspkts256octetsto511octets);
608 UPDATE_STAT64(tx_stat_gt1023,
609 tx_stat_etherstatspkts512octetsto1023octets);
610 UPDATE_STAT64(tx_stat_gt1518,
611 tx_stat_etherstatspkts1024octetsto1522octets);
612 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
613 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
614 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
615 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
616 UPDATE_STAT64(tx_stat_gterr,
617 tx_stat_dot3statsinternalmactransmiterrors);
618 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
619 }
608 620
609 estats->pause_frames_received_hi = 621 estats->pause_frames_received_hi =
610 pstats->mac_stx[1].rx_stat_bmac_xpf_hi; 622 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
@@ -1121,24 +1133,17 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
1121 1133
1122 bp->executer_idx = 0; 1134 bp->executer_idx = 0;
1123 1135
1124 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | 1136 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
1125 DMAE_CMD_C_ENABLE |
1126 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1127#ifdef __BIG_ENDIAN
1128 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1129#else
1130 DMAE_CMD_ENDIANITY_DW_SWAP |
1131#endif
1132 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1133 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1134 1137
1135 if (bp->port.port_stx) { 1138 if (bp->port.port_stx) {
1136 1139
1137 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1140 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1138 if (bp->func_stx) 1141 if (bp->func_stx)
1139 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); 1142 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1143 opcode, DMAE_COMP_GRC);
1140 else 1144 else
1141 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); 1145 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1146 opcode, DMAE_COMP_PCI);
1142 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1147 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1143 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1148 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1144 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1149 dmae->dst_addr_lo = bp->port.port_stx >> 2;
@@ -1162,7 +1167,8 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
1162 if (bp->func_stx) { 1167 if (bp->func_stx) {
1163 1168
1164 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1169 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1165 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); 1170 dmae->opcode =
1171 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1166 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 1172 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1167 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 1173 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1168 dmae->dst_addr_lo = bp->func_stx >> 2; 1174 dmae->dst_addr_lo = bp->func_stx >> 2;
@@ -1255,16 +1261,8 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1255 bp->executer_idx = 0; 1261 bp->executer_idx = 0;
1256 1262
1257 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1263 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1258 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | 1264 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1259 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | 1265 true, DMAE_COMP_PCI);
1260 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1261#ifdef __BIG_ENDIAN
1262 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1263#else
1264 DMAE_CMD_ENDIANITY_DW_SWAP |
1265#endif
1266 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1267 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1268 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1266 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1269 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1267 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1270 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1268 dmae->dst_addr_lo = bp->port.port_stx >> 2;
@@ -1282,8 +1280,6 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1282static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1280static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1283{ 1281{
1284 int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; 1282 int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
1285 int port = BP_PORT(bp);
1286 int func;
1287 u32 func_stx; 1283 u32 func_stx;
1288 1284
1289 /* sanity */ 1285 /* sanity */
@@ -1296,9 +1292,9 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1296 func_stx = bp->func_stx; 1292 func_stx = bp->func_stx;
1297 1293
1298 for (vn = VN_0; vn < vn_max; vn++) { 1294 for (vn = VN_0; vn < vn_max; vn++) {
1299 func = 2*vn + port; 1295 int mb_idx = !CHIP_IS_E2(bp) ? 2*vn + BP_PORT(bp) : vn;
1300 1296
1301 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); 1297 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1302 bnx2x_func_stats_init(bp); 1298 bnx2x_func_stats_init(bp);
1303 bnx2x_hw_stats_post(bp); 1299 bnx2x_hw_stats_post(bp);
1304 bnx2x_stats_comp(bp); 1300 bnx2x_stats_comp(bp);
@@ -1322,16 +1318,8 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1322 bp->executer_idx = 0; 1318 bp->executer_idx = 0;
1323 memset(dmae, 0, sizeof(struct dmae_command)); 1319 memset(dmae, 0, sizeof(struct dmae_command));
1324 1320
1325 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 1321 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
1326 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | 1322 true, DMAE_COMP_PCI);
1327 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1328#ifdef __BIG_ENDIAN
1329 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1330#else
1331 DMAE_CMD_ENDIANITY_DW_SWAP |
1332#endif
1333 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1334 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1335 dmae->src_addr_lo = bp->func_stx >> 2; 1323 dmae->src_addr_lo = bp->func_stx >> 2;
1336 dmae->src_addr_hi = 0; 1324 dmae->src_addr_hi = 0;
1337 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); 1325 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
@@ -1349,7 +1337,7 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1349void bnx2x_stats_init(struct bnx2x *bp) 1337void bnx2x_stats_init(struct bnx2x *bp)
1350{ 1338{
1351 int port = BP_PORT(bp); 1339 int port = BP_PORT(bp);
1352 int func = BP_FUNC(bp); 1340 int mb_idx = BP_FW_MB_IDX(bp);
1353 int i; 1341 int i;
1354 1342
1355 bp->stats_pending = 0; 1343 bp->stats_pending = 0;
@@ -1359,7 +1347,7 @@ void bnx2x_stats_init(struct bnx2x *bp)
1359 /* port and func stats for management */ 1347 /* port and func stats for management */
1360 if (!BP_NOMCP(bp)) { 1348 if (!BP_NOMCP(bp)) {
1361 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); 1349 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1362 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); 1350 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1363 1351
1364 } else { 1352 } else {
1365 bp->port.port_stx = 0; 1353 bp->port.port_stx = 0;
diff --git a/firmware/Makefile b/firmware/Makefile
index e0a3439ab312..494a167c6552 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -33,7 +33,8 @@ fw-shipped-$(CONFIG_ADAPTEC_STARFIRE) += adaptec/starfire_rx.bin \
33fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin 33fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin
34fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw 34fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw
35fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-6.0.34.0.fw \ 35fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-6.0.34.0.fw \
36 bnx2x/bnx2x-e1h-6.0.34.0.fw 36 bnx2x/bnx2x-e1h-6.0.34.0.fw \
37 bnx2x/bnx2x-e2-6.0.34.0.fw
37fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-5.0.0.j15.fw \ 38fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-5.0.0.j15.fw \
38 bnx2/bnx2-rv2p-09-5.0.0.j10.fw \ 39 bnx2/bnx2-rv2p-09-5.0.0.j10.fw \
39 bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw \ 40 bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw \