aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 23:01:30 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 23:01:30 -0500
commitc5ce28df0e7c01a1de23c36ebdefcd803f2b6cbb (patch)
tree9830baf38832769e1cf621708889111bbe3c93df /drivers/net/ethernet
parent29afc4e9a408f2304e09c6dd0dbcfbd2356d0faa (diff)
parent9399f0c51489ae8c16d6559b82a452fdc1895e91 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) More iov_iter conversion work from Al Viro. [ The "crypto: switch af_alg_make_sg() to iov_iter" commit was wrong, and this pull actually adds an extra commit on top of the branch I'm pulling to fix that up, so that the pre-merge state is ok. - Linus ] 2) Various optimizations to the ipv4 forwarding information base trie lookup implementation. From Alexander Duyck. 3) Remove sock_iocb altogether, from CHristoph Hellwig. 4) Allow congestion control algorithm selection via routing metrics. From Daniel Borkmann. 5) Make ipv4 uncached route list per-cpu, from Eric Dumazet. 6) Handle rfs hash collisions more gracefully, also from Eric Dumazet. 7) Add xmit_more support to r8169, e1000, and e1000e drivers. From Florian Westphal. 8) Transparent Ethernet Bridging support for GRO, from Jesse Gross. 9) Add BPF packet actions to packet scheduler, from Jiri Pirko. 10) Add support for uniqu flow IDs to openvswitch, from Joe Stringer. 11) New NetCP ethernet driver, from Muralidharan Karicheri and Wingman Kwok. 12) More sanely handle out-of-window dupacks, which can result in serious ACK storms. From Neal Cardwell. 13) Various rhashtable bug fixes and enhancements, from Herbert Xu, Patrick McHardy, and Thomas Graf. 14) Support xmit_more in be2net, from Sathya Perla. 15) Group Policy extensions for vxlan, from Thomas Graf. 16) Remove Checksum Offload support for vxlan, from Tom Herbert. 17) Like ipv4, support lockless transmit over ipv6 UDP sockets. From Vlad Yasevich. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1494+1 commits) crypto: fix af_alg_make_sg() conversion to iov_iter ipv4: Namespecify TCP PMTU mechanism i40e: Fix for stats init function call in Rx setup tcp: don't include Fast Open option in SYN-ACK on pure SYN-data openvswitch: Only set TUNNEL_VXLAN_OPT if VXLAN-GBP metadata is set ipv6: Make __ipv6_select_ident static ipv6: Fix fragment id assignment on LE arches. bridge: Fix inability to add non-vlan fdb entry net: Mellanox: Delete unnecessary checks before the function call "vunmap" cxgb4: Add support in cxgb4 to get expansion rom version via ethtool ethtool: rename reserved1 memeber in ethtool_drvinfo for expansion ROM version net: dsa: Remove redundant phy_attach() IB/mlx4: Reset flow support for IB kernel ULPs IB/mlx4: Always use the correct port for mirrored multicast attachments net/bonding: Fix potential bad memory access during bonding events tipc: remove tipc_snprintf tipc: nl compat add noop and remove legacy nl framework tipc: convert legacy nl stats show to nl compat tipc: convert legacy nl net id get to nl compat tipc: convert legacy nl net id set to nl compat ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/typhoon.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c8
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c32
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c66
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c78
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c203
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c29
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c12
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h31
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c94
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c109
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c9
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c29
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c84
-rw-r--r--drivers/net/ethernet/cadence/macb.h631
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/mc5.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c317
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h169
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c100
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c1917
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1003
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c270
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c1543
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h367
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h3392
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h124
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h101
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c43
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h16
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c56
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c21
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c179
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_stats.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c40
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h203
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c231
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h218
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c16
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h240
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c951
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c145
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c95
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c130
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig9
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c971
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_mdio.c186
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig11
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c20
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c41
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c44
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h152
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c136
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c149
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c34
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h108
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c44
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c112
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h11
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c157
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c267
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c118
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c90
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h36
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c499
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h10
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c294
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c144
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c489
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c12
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c3
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c4
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c25
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c16
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c48
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h30
-rw-r--r--drivers/net/ethernet/rocker/rocker.c177
-rw-r--r--drivers/net/ethernet/rocker/rocker.h21
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c69
-rw-r--r--drivers/net/ethernet/smsc/Kconfig10
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c437
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h1
-rw-r--r--drivers/net/ethernet/sun/niu.c3
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c90
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c4
-rw-r--r--drivers/net/ethernet/ti/Kconfig25
-rw-r--r--drivers/net/ethernet/ti/Makefile11
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c55
-rw-r--r--drivers/net/ethernet/ti/cpsw.c111
-rw-r--r--drivers/net/ethernet/ti/cpsw.h2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c26
-rw-r--r--drivers/net/ethernet/ti/cpts.c5
-rw-r--r--drivers/net/ethernet/ti/cpts.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c56
-rw-r--r--drivers/net/ethernet/ti/netcp.h229
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2149
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c2159
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c131
-rw-r--r--drivers/net/ethernet/ti/netcp_xgbepcsr.c501
-rw-r--r--drivers/net/ethernet/ti/tlan.c14
-rw-r--r--drivers/net/ethernet/via/via-rhine.c9
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
214 files changed, 20425 insertions, 5476 deletions
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index dede43f4ce09..8f8418d2ac4a 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -769,11 +769,11 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
769 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM; 769 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
770 } 770 }
771 771
772 if(vlan_tx_tag_present(skb)) { 772 if (skb_vlan_tag_present(skb)) {
773 first_txd->processFlags |= 773 first_txd->processFlags |=
774 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY; 774 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
775 first_txd->processFlags |= 775 first_txd->processFlags |=
776 cpu_to_le32(htons(vlan_tx_tag_get(skb)) << 776 cpu_to_le32(htons(skb_vlan_tag_get(skb)) <<
777 TYPHOON_TX_PF_VLAN_TAG_SHIFT); 777 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
778 } 778 }
779 779
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index b68074803de3..b90a26b13fdf 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2429,9 +2429,9 @@ restart:
2429 flagsize = (skb->len << 16) | (BD_FLG_END); 2429 flagsize = (skb->len << 16) | (BD_FLG_END);
2430 if (skb->ip_summed == CHECKSUM_PARTIAL) 2430 if (skb->ip_summed == CHECKSUM_PARTIAL)
2431 flagsize |= BD_FLG_TCP_UDP_SUM; 2431 flagsize |= BD_FLG_TCP_UDP_SUM;
2432 if (vlan_tx_tag_present(skb)) { 2432 if (skb_vlan_tag_present(skb)) {
2433 flagsize |= BD_FLG_VLAN_TAG; 2433 flagsize |= BD_FLG_VLAN_TAG;
2434 vlan_tag = vlan_tx_tag_get(skb); 2434 vlan_tag = skb_vlan_tag_get(skb);
2435 } 2435 }
2436 desc = ap->tx_ring + idx; 2436 desc = ap->tx_ring + idx;
2437 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); 2437 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
@@ -2450,9 +2450,9 @@ restart:
2450 flagsize = (skb_headlen(skb) << 16); 2450 flagsize = (skb_headlen(skb) << 16);
2451 if (skb->ip_summed == CHECKSUM_PARTIAL) 2451 if (skb->ip_summed == CHECKSUM_PARTIAL)
2452 flagsize |= BD_FLG_TCP_UDP_SUM; 2452 flagsize |= BD_FLG_TCP_UDP_SUM;
2453 if (vlan_tx_tag_present(skb)) { 2453 if (skb_vlan_tag_present(skb)) {
2454 flagsize |= BD_FLG_VLAN_TAG; 2454 flagsize |= BD_FLG_VLAN_TAG;
2455 vlan_tag = vlan_tx_tag_get(skb); 2455 vlan_tag = skb_vlan_tag_get(skb);
2456 } 2456 }
2457 2457
2458 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag); 2458 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 77f1f6048ddd..c638c85f3954 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -179,7 +179,7 @@ config SUNLANCE
179 179
180config AMD_XGBE 180config AMD_XGBE
181 tristate "AMD 10GbE Ethernet driver" 181 tristate "AMD 10GbE Ethernet driver"
182 depends on OF_NET && HAS_IOMEM 182 depends on (OF_NET || ACPI) && HAS_IOMEM
183 select PHYLIB 183 select PHYLIB
184 select AMD_XGBE_PHY 184 select AMD_XGBE_PHY
185 select BITREVERSE 185 select BITREVERSE
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 841e6558db68..4c2ae2221780 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1299,11 +1299,11 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1299 lp->tx_ring[tx_index].tx_flags = 0; 1299 lp->tx_ring[tx_index].tx_flags = 0;
1300 1300
1301#if AMD8111E_VLAN_TAG_USED 1301#if AMD8111E_VLAN_TAG_USED
1302 if (vlan_tx_tag_present(skb)) { 1302 if (skb_vlan_tag_present(skb)) {
1303 lp->tx_ring[tx_index].tag_ctrl_cmd |= 1303 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1304 cpu_to_le16(TCC_VLAN_INSERT); 1304 cpu_to_le16(TCC_VLAN_INSERT);
1305 lp->tx_ring[tx_index].tag_ctrl_info = 1305 lp->tx_ring[tx_index].tag_ctrl_info =
1306 cpu_to_le16(vlan_tx_tag_get(skb)); 1306 cpu_to_le16(skb_vlan_tag_get(skb));
1307 1307
1308 } 1308 }
1309#endif 1309#endif
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index e2e3aaf501a2..11d6e6561df1 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -2806,7 +2806,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
2806 2806
2807/* 2807/*
2808 * Check for loss of link and link establishment. 2808 * Check for loss of link and link establishment.
2809 * Can not use mii_check_media because it does nothing if mode is forced. 2809 * Could possibly be changed to use mii_check_media instead.
2810 */ 2810 */
2811 2811
2812static void pcnet32_watchdog(struct net_device *dev) 2812static void pcnet32_watchdog(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 76479d04b903..2c063b60db4b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -328,7 +328,7 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
328 328
329 buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name); 329 buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
330 pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL); 330 pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
331 if (pdata->xgbe_debugfs == NULL) { 331 if (!pdata->xgbe_debugfs) {
332 netdev_err(pdata->netdev, "debugfs_create_dir failed\n"); 332 netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
333 return; 333 return;
334 } 334 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index a50891f52197..d81fc6bd4759 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -422,7 +422,6 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
422 422
423 ring->cur = 0; 423 ring->cur = 0;
424 ring->dirty = 0; 424 ring->dirty = 0;
425 memset(&ring->rx, 0, sizeof(ring->rx));
426 425
427 hw_if->rx_desc_init(channel); 426 hw_if->rx_desc_init(channel);
428 } 427 }
@@ -621,35 +620,6 @@ err_out:
621 return 0; 620 return 0;
622} 621}
623 622
624static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
625{
626 struct xgbe_prv_data *pdata = channel->pdata;
627 struct xgbe_hw_if *hw_if = &pdata->hw_if;
628 struct xgbe_ring *ring = channel->rx_ring;
629 struct xgbe_ring_data *rdata;
630 int i;
631
632 DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
633 ring->rx.realloc_index);
634
635 for (i = 0; i < ring->dirty; i++) {
636 rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
637
638 /* Reset rdata values */
639 xgbe_unmap_rdata(pdata, rdata);
640
641 if (xgbe_map_rx_buffer(pdata, ring, rdata))
642 break;
643
644 hw_if->rx_desc_reset(rdata);
645
646 ring->rx.realloc_index++;
647 }
648 ring->dirty = 0;
649
650 DBGPR("<--xgbe_realloc_rx_buffer\n");
651}
652
653void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) 623void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
654{ 624{
655 DBGPR("-->xgbe_init_function_ptrs_desc\n"); 625 DBGPR("-->xgbe_init_function_ptrs_desc\n");
@@ -657,7 +627,7 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
657 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; 627 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
658 desc_if->free_ring_resources = xgbe_free_ring_resources; 628 desc_if->free_ring_resources = xgbe_free_ring_resources;
659 desc_if->map_tx_skb = xgbe_map_tx_skb; 629 desc_if->map_tx_skb = xgbe_map_tx_skb;
660 desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer; 630 desc_if->map_rx_buffer = xgbe_map_rx_buffer;
661 desc_if->unmap_rdata = xgbe_unmap_rdata; 631 desc_if->unmap_rdata = xgbe_unmap_rdata;
662 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; 632 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
663 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; 633 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 4c66cd1d1e60..400757b49872 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -115,6 +115,7 @@
115 */ 115 */
116 116
117#include <linux/phy.h> 117#include <linux/phy.h>
118#include <linux/mdio.h>
118#include <linux/clk.h> 119#include <linux/clk.h>
119#include <linux/bitrev.h> 120#include <linux/bitrev.h>
120#include <linux/crc32.h> 121#include <linux/crc32.h>
@@ -130,7 +131,7 @@ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
130 131
131 DBGPR("-->xgbe_usec_to_riwt\n"); 132 DBGPR("-->xgbe_usec_to_riwt\n");
132 133
133 rate = clk_get_rate(pdata->sysclk); 134 rate = pdata->sysclk_rate;
134 135
135 /* 136 /*
136 * Convert the input usec value to the watchdog timer value. Each 137 * Convert the input usec value to the watchdog timer value. Each
@@ -153,7 +154,7 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
153 154
154 DBGPR("-->xgbe_riwt_to_usec\n"); 155 DBGPR("-->xgbe_riwt_to_usec\n");
155 156
156 rate = clk_get_rate(pdata->sysclk); 157 rate = pdata->sysclk_rate;
157 158
158 /* 159 /*
159 * Convert the input watchdog timer value to the usec value. Each 160 * Convert the input watchdog timer value to the usec value. Each
@@ -673,6 +674,9 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
673 674
674static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) 675static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
675{ 676{
677 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
678 return 0;
679
676 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3); 680 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
677 681
678 return 0; 682 return 0;
@@ -680,6 +684,9 @@ static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
680 684
681static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata) 685static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
682{ 686{
687 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
688 return 0;
689
683 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2); 690 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
684 691
685 return 0; 692 return 0;
@@ -687,6 +694,9 @@ static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
687 694
688static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata) 695static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
689{ 696{
697 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
698 return 0;
699
690 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0); 700 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
691 701
692 return 0; 702 return 0;
@@ -881,6 +891,23 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
881 else 891 else
882 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 892 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
883 893
894 /* If the PCS is changing modes, match the MAC speed to it */
895 if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
896 ((mmd_address & 0xffff) == MDIO_CTRL2)) {
897 struct phy_device *phydev = pdata->phydev;
898
899 if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
900 /* KX mode */
901 if (phydev->supported & SUPPORTED_1000baseKX_Full)
902 xgbe_set_gmii_speed(pdata);
903 else
904 xgbe_set_gmii_2500_speed(pdata);
905 } else {
906 /* KR mode */
907 xgbe_set_xgmii_speed(pdata);
908 }
909 }
910
884 /* The PCS registers are accessed using mmio. The underlying APB3 911 /* The PCS registers are accessed using mmio. The underlying APB3
885 * management interface uses indirect addressing to access the MMD 912 * management interface uses indirect addressing to access the MMD
886 * register sets. This requires accessing of the PCS register in two 913 * register sets. This requires accessing of the PCS register in two
@@ -1359,6 +1386,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1359 unsigned int tso_context, vlan_context; 1386 unsigned int tso_context, vlan_context;
1360 unsigned int tx_set_ic; 1387 unsigned int tx_set_ic;
1361 int start_index = ring->cur; 1388 int start_index = ring->cur;
1389 int cur_index = ring->cur;
1362 int i; 1390 int i;
1363 1391
1364 DBGPR("-->xgbe_dev_xmit\n"); 1392 DBGPR("-->xgbe_dev_xmit\n");
@@ -1401,7 +1429,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1401 else 1429 else
1402 tx_set_ic = 0; 1430 tx_set_ic = 0;
1403 1431
1404 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1432 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1405 rdesc = rdata->rdesc; 1433 rdesc = rdata->rdesc;
1406 1434
1407 /* Create a context descriptor if this is a TSO packet */ 1435 /* Create a context descriptor if this is a TSO packet */
@@ -1444,8 +1472,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1444 ring->tx.cur_vlan_ctag = packet->vlan_ctag; 1472 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1445 } 1473 }
1446 1474
1447 ring->cur++; 1475 cur_index++;
1448 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1476 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1449 rdesc = rdata->rdesc; 1477 rdesc = rdata->rdesc;
1450 } 1478 }
1451 1479
@@ -1473,7 +1501,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1473 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1501 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1474 1502
1475 /* Set OWN bit if not the first descriptor */ 1503 /* Set OWN bit if not the first descriptor */
1476 if (ring->cur != start_index) 1504 if (cur_index != start_index)
1477 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1505 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1478 1506
1479 if (tso) { 1507 if (tso) {
@@ -1497,9 +1525,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1497 packet->length); 1525 packet->length);
1498 } 1526 }
1499 1527
1500 for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) { 1528 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
1501 ring->cur++; 1529 cur_index++;
1502 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1530 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1503 rdesc = rdata->rdesc; 1531 rdesc = rdata->rdesc;
1504 1532
1505 /* Update buffer address */ 1533 /* Update buffer address */
@@ -1551,7 +1579,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1551 /* Make sure ownership is written to the descriptor */ 1579 /* Make sure ownership is written to the descriptor */
1552 wmb(); 1580 wmb();
1553 1581
1554 ring->cur++; 1582 ring->cur = cur_index + 1;
1555 if (!packet->skb->xmit_more || 1583 if (!packet->skb->xmit_more ||
1556 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, 1584 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1557 channel->queue_index))) 1585 channel->queue_index)))
@@ -2107,6 +2135,23 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2107 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 2135 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2108} 2136}
2109 2137
2138static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2139{
2140 switch (pdata->phy_speed) {
2141 case SPEED_10000:
2142 xgbe_set_xgmii_speed(pdata);
2143 break;
2144
2145 case SPEED_2500:
2146 xgbe_set_gmii_2500_speed(pdata);
2147 break;
2148
2149 case SPEED_1000:
2150 xgbe_set_gmii_speed(pdata);
2151 break;
2152 }
2153}
2154
2110static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) 2155static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2111{ 2156{
2112 if (pdata->netdev->features & NETIF_F_RXCSUM) 2157 if (pdata->netdev->features & NETIF_F_RXCSUM)
@@ -2757,6 +2802,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
2757 xgbe_config_mac_address(pdata); 2802 xgbe_config_mac_address(pdata);
2758 xgbe_config_jumbo_enable(pdata); 2803 xgbe_config_jumbo_enable(pdata);
2759 xgbe_config_flow_control(pdata); 2804 xgbe_config_flow_control(pdata);
2805 xgbe_config_mac_speed(pdata);
2760 xgbe_config_checksum_offload(pdata); 2806 xgbe_config_checksum_offload(pdata);
2761 xgbe_config_vlan_support(pdata); 2807 xgbe_config_vlan_support(pdata);
2762 xgbe_config_mmc(pdata); 2808 xgbe_config_mmc(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index e5ffb2ccb67d..b93d4404d975 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -225,6 +225,11 @@ static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
225 return (ring->rdesc_count - (ring->cur - ring->dirty)); 225 return (ring->rdesc_count - (ring->cur - ring->dirty));
226} 226}
227 227
228static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
229{
230 return (ring->cur - ring->dirty);
231}
232
228static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, 233static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
229 struct xgbe_ring *ring, unsigned int count) 234 struct xgbe_ring *ring, unsigned int count)
230{ 235{
@@ -337,12 +342,13 @@ static irqreturn_t xgbe_isr(int irq, void *data)
337 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); 342 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
338 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); 343 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
339 344
340 /* If we get a TI or RI interrupt that means per channel DMA 345 /* The TI or RI interrupt bits may still be set even if using
341 * interrupts are not enabled, so we use the private data napi 346 * per channel DMA interrupts. Check to be sure those are not
342 * structure, not the per channel napi structure 347 * enabled before using the private data napi structure.
343 */ 348 */
344 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || 349 if (!pdata->per_channel_irq &&
345 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) { 350 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
351 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
346 if (napi_schedule_prep(&pdata->napi)) { 352 if (napi_schedule_prep(&pdata->napi)) {
347 /* Disable Tx and Rx interrupts */ 353 /* Disable Tx and Rx interrupts */
348 xgbe_disable_rx_tx_ints(pdata); 354 xgbe_disable_rx_tx_ints(pdata);
@@ -410,17 +416,13 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
410 struct xgbe_channel *channel = container_of(timer, 416 struct xgbe_channel *channel = container_of(timer,
411 struct xgbe_channel, 417 struct xgbe_channel,
412 tx_timer); 418 tx_timer);
413 struct xgbe_ring *ring = channel->tx_ring;
414 struct xgbe_prv_data *pdata = channel->pdata; 419 struct xgbe_prv_data *pdata = channel->pdata;
415 struct napi_struct *napi; 420 struct napi_struct *napi;
416 unsigned long flags;
417 421
418 DBGPR("-->xgbe_tx_timer\n"); 422 DBGPR("-->xgbe_tx_timer\n");
419 423
420 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; 424 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
421 425
422 spin_lock_irqsave(&ring->lock, flags);
423
424 if (napi_schedule_prep(napi)) { 426 if (napi_schedule_prep(napi)) {
425 /* Disable Tx and Rx interrupts */ 427 /* Disable Tx and Rx interrupts */
426 if (pdata->per_channel_irq) 428 if (pdata->per_channel_irq)
@@ -434,8 +436,6 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
434 436
435 channel->tx_timer_active = 0; 437 channel->tx_timer_active = 0;
436 438
437 spin_unlock_irqrestore(&ring->lock, flags);
438
439 DBGPR("<--xgbe_tx_timer\n"); 439 DBGPR("<--xgbe_tx_timer\n");
440 440
441 return HRTIMER_NORESTART; 441 return HRTIMER_NORESTART;
@@ -694,7 +694,7 @@ static void xgbe_adjust_link(struct net_device *netdev)
694 struct phy_device *phydev = pdata->phydev; 694 struct phy_device *phydev = pdata->phydev;
695 int new_state = 0; 695 int new_state = 0;
696 696
697 if (phydev == NULL) 697 if (!phydev)
698 return; 698 return;
699 699
700 if (phydev->link) { 700 if (phydev->link) {
@@ -929,7 +929,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
929 DBGPR("<--xgbe_stop\n"); 929 DBGPR("<--xgbe_stop\n");
930} 930}
931 931
932static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) 932static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
933{ 933{
934 struct xgbe_channel *channel; 934 struct xgbe_channel *channel;
935 struct xgbe_hw_if *hw_if = &pdata->hw_if; 935 struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -952,9 +952,8 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
952 xgbe_free_tx_data(pdata); 952 xgbe_free_tx_data(pdata);
953 xgbe_free_rx_data(pdata); 953 xgbe_free_rx_data(pdata);
954 954
955 /* Issue software reset to device if requested */ 955 /* Issue software reset to device */
956 if (reset) 956 hw_if->exit(pdata);
957 hw_if->exit(pdata);
958 957
959 xgbe_start(pdata); 958 xgbe_start(pdata);
960 959
@@ -969,7 +968,7 @@ static void xgbe_restart(struct work_struct *work)
969 968
970 rtnl_lock(); 969 rtnl_lock();
971 970
972 xgbe_restart_dev(pdata, 1); 971 xgbe_restart_dev(pdata);
973 972
974 rtnl_unlock(); 973 rtnl_unlock();
975} 974}
@@ -1167,8 +1166,8 @@ static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1167 1166
1168static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet) 1167static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1169{ 1168{
1170 if (vlan_tx_tag_present(skb)) 1169 if (skb_vlan_tag_present(skb))
1171 packet->vlan_ctag = vlan_tx_tag_get(skb); 1170 packet->vlan_ctag = skb_vlan_tag_get(skb);
1172} 1171}
1173 1172
1174static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) 1173static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
@@ -1249,9 +1248,9 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1249 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1248 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1250 CSUM_ENABLE, 1); 1249 CSUM_ENABLE, 1);
1251 1250
1252 if (vlan_tx_tag_present(skb)) { 1251 if (skb_vlan_tag_present(skb)) {
1253 /* VLAN requires an extra descriptor if tag is different */ 1252 /* VLAN requires an extra descriptor if tag is different */
1254 if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag) 1253 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1255 /* We can share with the TSO context descriptor */ 1254 /* We can share with the TSO context descriptor */
1256 if (!context_desc) { 1255 if (!context_desc) {
1257 context_desc = 1; 1256 context_desc = 1;
@@ -1448,7 +1447,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1448 struct xgbe_ring *ring; 1447 struct xgbe_ring *ring;
1449 struct xgbe_packet_data *packet; 1448 struct xgbe_packet_data *packet;
1450 struct netdev_queue *txq; 1449 struct netdev_queue *txq;
1451 unsigned long flags;
1452 int ret; 1450 int ret;
1453 1451
1454 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); 1452 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
@@ -1460,8 +1458,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1460 1458
1461 ret = NETDEV_TX_OK; 1459 ret = NETDEV_TX_OK;
1462 1460
1463 spin_lock_irqsave(&ring->lock, flags);
1464
1465 if (skb->len == 0) { 1461 if (skb->len == 0) {
1466 netdev_err(netdev, "empty skb received from stack\n"); 1462 netdev_err(netdev, "empty skb received from stack\n");
1467 dev_kfree_skb_any(skb); 1463 dev_kfree_skb_any(skb);
@@ -1508,10 +1504,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1508 ret = NETDEV_TX_OK; 1504 ret = NETDEV_TX_OK;
1509 1505
1510tx_netdev_return: 1506tx_netdev_return:
1511 spin_unlock_irqrestore(&ring->lock, flags);
1512
1513 DBGPR("<--xgbe_xmit\n");
1514
1515 return ret; 1507 return ret;
1516} 1508}
1517 1509
@@ -1589,7 +1581,7 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1589 pdata->rx_buf_size = ret; 1581 pdata->rx_buf_size = ret;
1590 netdev->mtu = mtu; 1582 netdev->mtu = mtu;
1591 1583
1592 xgbe_restart_dev(pdata, 0); 1584 xgbe_restart_dev(pdata);
1593 1585
1594 DBGPR("<--xgbe_change_mtu\n"); 1586 DBGPR("<--xgbe_change_mtu\n");
1595 1587
@@ -1778,15 +1770,28 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
1778static void xgbe_rx_refresh(struct xgbe_channel *channel) 1770static void xgbe_rx_refresh(struct xgbe_channel *channel)
1779{ 1771{
1780 struct xgbe_prv_data *pdata = channel->pdata; 1772 struct xgbe_prv_data *pdata = channel->pdata;
1773 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1781 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1774 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1782 struct xgbe_ring *ring = channel->rx_ring; 1775 struct xgbe_ring *ring = channel->rx_ring;
1783 struct xgbe_ring_data *rdata; 1776 struct xgbe_ring_data *rdata;
1784 1777
1785 desc_if->realloc_rx_buffer(channel); 1778 while (ring->dirty != ring->cur) {
1779 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1780
1781 /* Reset rdata values */
1782 desc_if->unmap_rdata(pdata, rdata);
1783
1784 if (desc_if->map_rx_buffer(pdata, ring, rdata))
1785 break;
1786
1787 hw_if->rx_desc_reset(rdata);
1788
1789 ring->dirty++;
1790 }
1786 1791
1787 /* Update the Rx Tail Pointer Register with address of 1792 /* Update the Rx Tail Pointer Register with address of
1788 * the last cleaned entry */ 1793 * the last cleaned entry */
1789 rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1); 1794 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
1790 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 1795 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1791 lower_32_bits(rdata->rdesc_dma)); 1796 lower_32_bits(rdata->rdesc_dma));
1792} 1797}
@@ -1826,7 +1831,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1826 struct xgbe_ring_desc *rdesc; 1831 struct xgbe_ring_desc *rdesc;
1827 struct net_device *netdev = pdata->netdev; 1832 struct net_device *netdev = pdata->netdev;
1828 struct netdev_queue *txq; 1833 struct netdev_queue *txq;
1829 unsigned long flags;
1830 int processed = 0; 1834 int processed = 0;
1831 unsigned int tx_packets = 0, tx_bytes = 0; 1835 unsigned int tx_packets = 0, tx_bytes = 0;
1832 1836
@@ -1838,8 +1842,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1838 1842
1839 txq = netdev_get_tx_queue(netdev, channel->queue_index); 1843 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1840 1844
1841 spin_lock_irqsave(&ring->lock, flags);
1842
1843 while ((processed < XGBE_TX_DESC_MAX_PROC) && 1845 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
1844 (ring->dirty != ring->cur)) { 1846 (ring->dirty != ring->cur)) {
1845 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); 1847 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
@@ -1870,7 +1872,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1870 } 1872 }
1871 1873
1872 if (!processed) 1874 if (!processed)
1873 goto unlock; 1875 return 0;
1874 1876
1875 netdev_tx_completed_queue(txq, tx_packets, tx_bytes); 1877 netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
1876 1878
@@ -1882,9 +1884,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1882 1884
1883 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed); 1885 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1884 1886
1885unlock:
1886 spin_unlock_irqrestore(&ring->lock, flags);
1887
1888 return processed; 1887 return processed;
1889} 1888}
1890 1889
@@ -1936,7 +1935,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1936read_again: 1935read_again:
1937 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1936 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1938 1937
1939 if (ring->dirty > (XGBE_RX_DESC_CNT >> 3)) 1938 if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
1940 xgbe_rx_refresh(channel); 1939 xgbe_rx_refresh(channel);
1941 1940
1942 if (hw_if->dev_read(channel)) 1941 if (hw_if->dev_read(channel))
@@ -1944,7 +1943,6 @@ read_again:
1944 1943
1945 received++; 1944 received++;
1946 ring->cur++; 1945 ring->cur++;
1947 ring->dirty++;
1948 1946
1949 incomplete = XGMAC_GET_BITS(packet->attributes, 1947 incomplete = XGMAC_GET_BITS(packet->attributes,
1950 RX_PACKET_ATTRIBUTES, 1948 RX_PACKET_ATTRIBUTES,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index dbd3850b8b0a..32dd65137051 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -123,7 +123,10 @@
123#include <linux/io.h> 123#include <linux/io.h>
124#include <linux/of.h> 124#include <linux/of.h>
125#include <linux/of_net.h> 125#include <linux/of_net.h>
126#include <linux/of_address.h>
126#include <linux/clk.h> 127#include <linux/clk.h>
128#include <linux/property.h>
129#include <linux/acpi.h>
127 130
128#include "xgbe.h" 131#include "xgbe.h"
129#include "xgbe-common.h" 132#include "xgbe-common.h"
@@ -148,6 +151,7 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
148 pdata->pause_autoneg = 1; 151 pdata->pause_autoneg = 1;
149 pdata->tx_pause = 1; 152 pdata->tx_pause = 1;
150 pdata->rx_pause = 1; 153 pdata->rx_pause = 1;
154 pdata->phy_speed = SPEED_UNKNOWN;
151 pdata->power_down = 0; 155 pdata->power_down = 0;
152 pdata->default_autoneg = AUTONEG_ENABLE; 156 pdata->default_autoneg = AUTONEG_ENABLE;
153 pdata->default_speed = SPEED_10000; 157 pdata->default_speed = SPEED_10000;
@@ -161,6 +165,96 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
161 xgbe_init_function_ptrs_desc(&pdata->desc_if); 165 xgbe_init_function_ptrs_desc(&pdata->desc_if);
162} 166}
163 167
168#ifdef CONFIG_ACPI
169static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
170{
171 struct acpi_device *adev = pdata->adev;
172 struct device *dev = pdata->dev;
173 u32 property;
174 acpi_handle handle;
175 acpi_status status;
176 unsigned long long data;
177 int cca;
178 int ret;
179
180 /* Obtain the system clock setting */
181 ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
182 if (ret) {
183 dev_err(dev, "unable to obtain %s property\n",
184 XGBE_ACPI_DMA_FREQ);
185 return ret;
186 }
187 pdata->sysclk_rate = property;
188
189 /* Obtain the PTP clock setting */
190 ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
191 if (ret) {
192 dev_err(dev, "unable to obtain %s property\n",
193 XGBE_ACPI_PTP_FREQ);
194 return ret;
195 }
196 pdata->ptpclk_rate = property;
197
198 /* Retrieve the device cache coherency value */
199 handle = adev->handle;
200 do {
201 status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
202 if (!ACPI_FAILURE(status)) {
203 cca = data;
204 break;
205 }
206
207 status = acpi_get_parent(handle, &handle);
208 } while (!ACPI_FAILURE(status));
209
210 if (ACPI_FAILURE(status)) {
211 dev_err(dev, "error obtaining acpi coherency value\n");
212 return -EINVAL;
213 }
214 pdata->coherent = !!cca;
215
216 return 0;
217}
218#else /* CONFIG_ACPI */
219static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
220{
221 return -EINVAL;
222}
223#endif /* CONFIG_ACPI */
224
225#ifdef CONFIG_OF
226static int xgbe_of_support(struct xgbe_prv_data *pdata)
227{
228 struct device *dev = pdata->dev;
229
230 /* Obtain the system clock setting */
231 pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
232 if (IS_ERR(pdata->sysclk)) {
233 dev_err(dev, "dma devm_clk_get failed\n");
234 return PTR_ERR(pdata->sysclk);
235 }
236 pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
237
238 /* Obtain the PTP clock setting */
239 pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
240 if (IS_ERR(pdata->ptpclk)) {
241 dev_err(dev, "ptp devm_clk_get failed\n");
242 return PTR_ERR(pdata->ptpclk);
243 }
244 pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
245
246 /* Retrieve the device cache coherency value */
247 pdata->coherent = of_dma_is_coherent(dev->of_node);
248
249 return 0;
250}
251#else /* CONFIG_OF */
252static int xgbe_of_support(struct xgbe_prv_data *pdata)
253{
254 return -EINVAL;
255}
256#endif /*CONFIG_OF */
257
164static int xgbe_probe(struct platform_device *pdev) 258static int xgbe_probe(struct platform_device *pdev)
165{ 259{
166 struct xgbe_prv_data *pdata; 260 struct xgbe_prv_data *pdata;
@@ -169,7 +263,7 @@ static int xgbe_probe(struct platform_device *pdev)
169 struct net_device *netdev; 263 struct net_device *netdev;
170 struct device *dev = &pdev->dev; 264 struct device *dev = &pdev->dev;
171 struct resource *res; 265 struct resource *res;
172 const u8 *mac_addr; 266 const char *phy_mode;
173 unsigned int i; 267 unsigned int i;
174 int ret; 268 int ret;
175 269
@@ -186,6 +280,7 @@ static int xgbe_probe(struct platform_device *pdev)
186 pdata = netdev_priv(netdev); 280 pdata = netdev_priv(netdev);
187 pdata->netdev = netdev; 281 pdata->netdev = netdev;
188 pdata->pdev = pdev; 282 pdata->pdev = pdev;
283 pdata->adev = ACPI_COMPANION(dev);
189 pdata->dev = dev; 284 pdata->dev = dev;
190 platform_set_drvdata(pdev, netdev); 285 platform_set_drvdata(pdev, netdev);
191 286
@@ -194,6 +289,9 @@ static int xgbe_probe(struct platform_device *pdev)
194 mutex_init(&pdata->rss_mutex); 289 mutex_init(&pdata->rss_mutex);
195 spin_lock_init(&pdata->tstamp_lock); 290 spin_lock_init(&pdata->tstamp_lock);
196 291
292 /* Check if we should use ACPI or DT */
293 pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
294
197 /* Set and validate the number of descriptors for a ring */ 295 /* Set and validate the number of descriptors for a ring */
198 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); 296 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
199 pdata->tx_desc_count = XGBE_TX_DESC_CNT; 297 pdata->tx_desc_count = XGBE_TX_DESC_CNT;
@@ -212,22 +310,6 @@ static int xgbe_probe(struct platform_device *pdev)
212 goto err_io; 310 goto err_io;
213 } 311 }
214 312
215 /* Obtain the system clock setting */
216 pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
217 if (IS_ERR(pdata->sysclk)) {
218 dev_err(dev, "dma devm_clk_get failed\n");
219 ret = PTR_ERR(pdata->sysclk);
220 goto err_io;
221 }
222
223 /* Obtain the PTP clock setting */
224 pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
225 if (IS_ERR(pdata->ptpclk)) {
226 dev_err(dev, "ptp devm_clk_get failed\n");
227 ret = PTR_ERR(pdata->ptpclk);
228 goto err_io;
229 }
230
231 /* Obtain the mmio areas for the device */ 313 /* Obtain the mmio areas for the device */
232 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 314 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
233 pdata->xgmac_regs = devm_ioremap_resource(dev, res); 315 pdata->xgmac_regs = devm_ioremap_resource(dev, res);
@@ -247,16 +329,42 @@ static int xgbe_probe(struct platform_device *pdev)
247 } 329 }
248 DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs); 330 DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
249 331
250 /* Set the DMA mask */ 332 /* Retrieve the MAC address */
251 if (!dev->dma_mask) 333 ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
252 dev->dma_mask = &dev->coherent_dma_mask; 334 pdata->mac_addr,
253 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); 335 sizeof(pdata->mac_addr));
254 if (ret) { 336 if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
255 dev_err(dev, "dma_set_mask_and_coherent failed\n"); 337 dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
338 if (!ret)
339 ret = -EINVAL;
256 goto err_io; 340 goto err_io;
257 } 341 }
258 342
259 if (of_property_read_bool(dev->of_node, "dma-coherent")) { 343 /* Retrieve the PHY mode - it must be "xgmii" */
344 ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
345 &phy_mode);
346 if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
347 dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
348 if (!ret)
349 ret = -EINVAL;
350 goto err_io;
351 }
352 pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
353
354 /* Check for per channel interrupt support */
355 if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
356 pdata->per_channel_irq = 1;
357
358 /* Obtain device settings unique to ACPI/OF */
359 if (pdata->use_acpi)
360 ret = xgbe_acpi_support(pdata);
361 else
362 ret = xgbe_of_support(pdata);
363 if (ret)
364 goto err_io;
365
366 /* Set the DMA coherency values */
367 if (pdata->coherent) {
260 pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; 368 pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
261 pdata->arcache = XGBE_DMA_OS_ARCACHE; 369 pdata->arcache = XGBE_DMA_OS_ARCACHE;
262 pdata->awcache = XGBE_DMA_OS_AWCACHE; 370 pdata->awcache = XGBE_DMA_OS_AWCACHE;
@@ -266,10 +374,16 @@ static int xgbe_probe(struct platform_device *pdev)
266 pdata->awcache = XGBE_DMA_SYS_AWCACHE; 374 pdata->awcache = XGBE_DMA_SYS_AWCACHE;
267 } 375 }
268 376
269 /* Check for per channel interrupt support */ 377 /* Set the DMA mask */
270 if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS)) 378 if (!dev->dma_mask)
271 pdata->per_channel_irq = 1; 379 dev->dma_mask = &dev->coherent_dma_mask;
380 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
381 if (ret) {
382 dev_err(dev, "dma_set_mask_and_coherent failed\n");
383 goto err_io;
384 }
272 385
386 /* Get the device interrupt */
273 ret = platform_get_irq(pdev, 0); 387 ret = platform_get_irq(pdev, 0);
274 if (ret < 0) { 388 if (ret < 0) {
275 dev_err(dev, "platform_get_irq 0 failed\n"); 389 dev_err(dev, "platform_get_irq 0 failed\n");
@@ -279,6 +393,7 @@ static int xgbe_probe(struct platform_device *pdev)
279 393
280 netdev->irq = pdata->dev_irq; 394 netdev->irq = pdata->dev_irq;
281 netdev->base_addr = (unsigned long)pdata->xgmac_regs; 395 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
396 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
282 397
283 /* Set all the function pointers */ 398 /* Set all the function pointers */
284 xgbe_init_all_fptrs(pdata); 399 xgbe_init_all_fptrs(pdata);
@@ -291,23 +406,6 @@ static int xgbe_probe(struct platform_device *pdev)
291 /* Populate the hardware features */ 406 /* Populate the hardware features */
292 xgbe_get_all_hw_features(pdata); 407 xgbe_get_all_hw_features(pdata);
293 408
294 /* Retrieve the MAC address */
295 mac_addr = of_get_mac_address(dev->of_node);
296 if (!mac_addr) {
297 dev_err(dev, "invalid mac address for this device\n");
298 ret = -EINVAL;
299 goto err_io;
300 }
301 memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
302
303 /* Retrieve the PHY mode - it must be "xgmii" */
304 pdata->phy_mode = of_get_phy_mode(dev->of_node);
305 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
306 dev_err(dev, "invalid phy-mode specified for this device\n");
307 ret = -EINVAL;
308 goto err_io;
309 }
310
311 /* Set default configuration data */ 409 /* Set default configuration data */
312 xgbe_default_config(pdata); 410 xgbe_default_config(pdata);
313 411
@@ -491,18 +589,35 @@ static int xgbe_resume(struct device *dev)
491} 589}
492#endif /* CONFIG_PM */ 590#endif /* CONFIG_PM */
493 591
592#ifdef CONFIG_ACPI
593static const struct acpi_device_id xgbe_acpi_match[] = {
594 { "AMDI8001", 0 },
595 {},
596};
597
598MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
599#endif
600
601#ifdef CONFIG_OF
494static const struct of_device_id xgbe_of_match[] = { 602static const struct of_device_id xgbe_of_match[] = {
495 { .compatible = "amd,xgbe-seattle-v1a", }, 603 { .compatible = "amd,xgbe-seattle-v1a", },
496 {}, 604 {},
497}; 605};
498 606
499MODULE_DEVICE_TABLE(of, xgbe_of_match); 607MODULE_DEVICE_TABLE(of, xgbe_of_match);
608#endif
609
500static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume); 610static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
501 611
502static struct platform_driver xgbe_driver = { 612static struct platform_driver xgbe_driver = {
503 .driver = { 613 .driver = {
504 .name = "amd-xgbe", 614 .name = "amd-xgbe",
615#ifdef CONFIG_ACPI
616 .acpi_match_table = xgbe_acpi_match,
617#endif
618#ifdef CONFIG_OF
505 .of_match_table = xgbe_of_match, 619 .of_match_table = xgbe_of_match,
620#endif
506 .pm = &xgbe_pm_ops, 621 .pm = &xgbe_pm_ops,
507 }, 622 },
508 .probe = xgbe_probe, 623 .probe = xgbe_probe,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 363b210560f3..59e267f3f1b7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -205,25 +205,16 @@ void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
205 205
206int xgbe_mdio_register(struct xgbe_prv_data *pdata) 206int xgbe_mdio_register(struct xgbe_prv_data *pdata)
207{ 207{
208 struct device_node *phy_node;
209 struct mii_bus *mii; 208 struct mii_bus *mii;
210 struct phy_device *phydev; 209 struct phy_device *phydev;
211 int ret = 0; 210 int ret = 0;
212 211
213 DBGPR("-->xgbe_mdio_register\n"); 212 DBGPR("-->xgbe_mdio_register\n");
214 213
215 /* Retrieve the phy-handle */
216 phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
217 if (!phy_node) {
218 dev_err(pdata->dev, "unable to parse phy-handle\n");
219 return -EINVAL;
220 }
221
222 mii = mdiobus_alloc(); 214 mii = mdiobus_alloc();
223 if (mii == NULL) { 215 if (!mii) {
224 dev_err(pdata->dev, "mdiobus_alloc failed\n"); 216 dev_err(pdata->dev, "mdiobus_alloc failed\n");
225 ret = -ENOMEM; 217 return -ENOMEM;
226 goto err_node_get;
227 } 218 }
228 219
229 /* Register on the MDIO bus (don't probe any PHYs) */ 220 /* Register on the MDIO bus (don't probe any PHYs) */
@@ -252,18 +243,19 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
252 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, 243 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
253 MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS])); 244 MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
254 245
255 of_node_get(phy_node);
256 phydev->dev.of_node = phy_node;
257 ret = phy_device_register(phydev); 246 ret = phy_device_register(phydev);
258 if (ret) { 247 if (ret) {
259 dev_err(pdata->dev, "phy_device_register failed\n"); 248 dev_err(pdata->dev, "phy_device_register failed\n");
260 of_node_put(phy_node); 249 goto err_phy_device;
250 }
251 if (!phydev->dev.driver) {
252 dev_err(pdata->dev, "phy driver probe failed\n");
253 ret = -EIO;
261 goto err_phy_device; 254 goto err_phy_device;
262 } 255 }
263 256
264 /* Add a reference to the PHY driver so it can't be unloaded */ 257 /* Add a reference to the PHY driver so it can't be unloaded */
265 pdata->phy_module = phydev->dev.driver ? 258 pdata->phy_module = phydev->dev.driver->owner;
266 phydev->dev.driver->owner : NULL;
267 if (!try_module_get(pdata->phy_module)) { 259 if (!try_module_get(pdata->phy_module)) {
268 dev_err(pdata->dev, "try_module_get failed\n"); 260 dev_err(pdata->dev, "try_module_get failed\n");
269 ret = -EIO; 261 ret = -EIO;
@@ -283,8 +275,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
283 275
284 pdata->phydev = phydev; 276 pdata->phydev = phydev;
285 277
286 of_node_put(phy_node);
287
288 DBGPHY_REGS(pdata); 278 DBGPHY_REGS(pdata);
289 279
290 DBGPR("<--xgbe_mdio_register\n"); 280 DBGPR("<--xgbe_mdio_register\n");
@@ -300,9 +290,6 @@ err_mdiobus_register:
300err_mdiobus_alloc: 290err_mdiobus_alloc:
301 mdiobus_free(mii); 291 mdiobus_free(mii);
302 292
303err_node_get:
304 of_node_put(phy_node);
305
306 return ret; 293 return ret;
307} 294}
308 295
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index a1bf9d1cdae1..f326178ef376 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -171,15 +171,9 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
171 struct xgbe_prv_data, 171 struct xgbe_prv_data,
172 ptp_clock_info); 172 ptp_clock_info);
173 unsigned long flags; 173 unsigned long flags;
174 u64 nsec;
175 174
176 spin_lock_irqsave(&pdata->tstamp_lock, flags); 175 spin_lock_irqsave(&pdata->tstamp_lock, flags);
177 176 timecounter_adjtime(&pdata->tstamp_tc, delta);
178 nsec = timecounter_read(&pdata->tstamp_tc);
179
180 nsec += delta;
181 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
182
183 spin_unlock_irqrestore(&pdata->tstamp_lock, flags); 177 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
184 178
185 return 0; 179 return 0;
@@ -239,7 +233,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
239 snprintf(info->name, sizeof(info->name), "%s", 233 snprintf(info->name, sizeof(info->name), "%s",
240 netdev_name(pdata->netdev)); 234 netdev_name(pdata->netdev));
241 info->owner = THIS_MODULE; 235 info->owner = THIS_MODULE;
242 info->max_adj = clk_get_rate(pdata->ptpclk); 236 info->max_adj = pdata->ptpclk_rate;
243 info->adjfreq = xgbe_adjfreq; 237 info->adjfreq = xgbe_adjfreq;
244 info->adjtime = xgbe_adjtime; 238 info->adjtime = xgbe_adjtime;
245 info->gettime = xgbe_gettime; 239 info->gettime = xgbe_gettime;
@@ -260,7 +254,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
260 */ 254 */
261 dividend = 50000000; 255 dividend = 50000000;
262 dividend <<= 32; 256 dividend <<= 32;
263 pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk)); 257 pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
264 258
265 /* Setup the timecounter */ 259 /* Setup the timecounter */
266 cc->read = xgbe_cc_read; 260 cc->read = xgbe_cc_read;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f9ec762ac3f0..13e8f95c077c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -124,7 +124,7 @@
124#include <linux/if_vlan.h> 124#include <linux/if_vlan.h>
125#include <linux/bitops.h> 125#include <linux/bitops.h>
126#include <linux/ptp_clock_kernel.h> 126#include <linux/ptp_clock_kernel.h>
127#include <linux/clocksource.h> 127#include <linux/timecounter.h>
128#include <linux/net_tstamp.h> 128#include <linux/net_tstamp.h>
129#include <net/dcbnl.h> 129#include <net/dcbnl.h>
130 130
@@ -182,10 +182,18 @@
182#define XGBE_PHY_NAME "amd_xgbe_phy" 182#define XGBE_PHY_NAME "amd_xgbe_phy"
183#define XGBE_PRTAD 0 183#define XGBE_PRTAD 0
184 184
185/* Common property names */
186#define XGBE_MAC_ADDR_PROPERTY "mac-address"
187#define XGBE_PHY_MODE_PROPERTY "phy-mode"
188#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
189
185/* Device-tree clock names */ 190/* Device-tree clock names */
186#define XGBE_DMA_CLOCK "dma_clk" 191#define XGBE_DMA_CLOCK "dma_clk"
187#define XGBE_PTP_CLOCK "ptp_clk" 192#define XGBE_PTP_CLOCK "ptp_clk"
188#define XGBE_DMA_IRQS "amd,per-channel-interrupt" 193
194/* ACPI property names */
195#define XGBE_ACPI_DMA_FREQ "amd,dma-freq"
196#define XGBE_ACPI_PTP_FREQ "amd,ptp-freq"
189 197
190/* Timestamp support - values based on 50MHz PTP clock 198/* Timestamp support - values based on 50MHz PTP clock
191 * 50MHz => 20 nsec 199 * 50MHz => 20 nsec
@@ -361,8 +369,7 @@ struct xgbe_ring {
361 * cur - Tx: index of descriptor to be used for current transfer 369 * cur - Tx: index of descriptor to be used for current transfer
362 * Rx: index of descriptor to check for packet availability 370 * Rx: index of descriptor to check for packet availability
363 * dirty - Tx: index of descriptor to check for transfer complete 371 * dirty - Tx: index of descriptor to check for transfer complete
364 * Rx: count of descriptors in which a packet has been received 372 * Rx: index of descriptor to check for buffer reallocation
365 * (used with skb_realloc_index to refresh the ring)
366 */ 373 */
367 unsigned int cur; 374 unsigned int cur;
368 unsigned int dirty; 375 unsigned int dirty;
@@ -377,11 +384,6 @@ struct xgbe_ring {
377 unsigned short cur_mss; 384 unsigned short cur_mss;
378 unsigned short cur_vlan_ctag; 385 unsigned short cur_vlan_ctag;
379 } tx; 386 } tx;
380
381 struct {
382 unsigned int realloc_index;
383 unsigned int realloc_threshold;
384 } rx;
385 }; 387 };
386} ____cacheline_aligned; 388} ____cacheline_aligned;
387 389
@@ -596,7 +598,8 @@ struct xgbe_desc_if {
596 int (*alloc_ring_resources)(struct xgbe_prv_data *); 598 int (*alloc_ring_resources)(struct xgbe_prv_data *);
597 void (*free_ring_resources)(struct xgbe_prv_data *); 599 void (*free_ring_resources)(struct xgbe_prv_data *);
598 int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *); 600 int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
599 void (*realloc_rx_buffer)(struct xgbe_channel *); 601 int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
602 struct xgbe_ring_data *);
600 void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *); 603 void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
601 void (*wrapper_tx_desc_init)(struct xgbe_prv_data *); 604 void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
602 void (*wrapper_rx_desc_init)(struct xgbe_prv_data *); 605 void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
@@ -650,8 +653,12 @@ struct xgbe_hw_features {
650struct xgbe_prv_data { 653struct xgbe_prv_data {
651 struct net_device *netdev; 654 struct net_device *netdev;
652 struct platform_device *pdev; 655 struct platform_device *pdev;
656 struct acpi_device *adev;
653 struct device *dev; 657 struct device *dev;
654 658
659 /* ACPI or DT flag */
660 unsigned int use_acpi;
661
655 /* XGMAC/XPCS related mmio registers */ 662 /* XGMAC/XPCS related mmio registers */
656 void __iomem *xgmac_regs; /* XGMAC CSRs */ 663 void __iomem *xgmac_regs; /* XGMAC CSRs */
657 void __iomem *xpcs_regs; /* XPCS MMD registers */ 664 void __iomem *xpcs_regs; /* XPCS MMD registers */
@@ -672,6 +679,7 @@ struct xgbe_prv_data {
672 struct xgbe_desc_if desc_if; 679 struct xgbe_desc_if desc_if;
673 680
674 /* AXI DMA settings */ 681 /* AXI DMA settings */
682 unsigned int coherent;
675 unsigned int axdomain; 683 unsigned int axdomain;
676 unsigned int arcache; 684 unsigned int arcache;
677 unsigned int awcache; 685 unsigned int awcache;
@@ -739,6 +747,7 @@ struct xgbe_prv_data {
739 unsigned int phy_rx_pause; 747 unsigned int phy_rx_pause;
740 748
741 /* Netdev related settings */ 749 /* Netdev related settings */
750 unsigned char mac_addr[ETH_ALEN];
742 netdev_features_t netdev_features; 751 netdev_features_t netdev_features;
743 struct napi_struct napi; 752 struct napi_struct napi;
744 struct xgbe_mmc_stats mmc_stats; 753 struct xgbe_mmc_stats mmc_stats;
@@ -748,7 +757,9 @@ struct xgbe_prv_data {
748 757
749 /* Device clocks */ 758 /* Device clocks */
750 struct clk *sysclk; 759 struct clk *sysclk;
760 unsigned long sysclk_rate;
751 struct clk *ptpclk; 761 struct clk *ptpclk;
762 unsigned long ptpclk_rate;
752 763
753 /* Timestamp support */ 764 /* Timestamp support */
754 spinlock_t tstamp_lock; 765 spinlock_t tstamp_lock;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 7ba83ffb08ac..869d97fcf781 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -593,10 +593,12 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
593 if (!xgene_ring_mgr_init(pdata)) 593 if (!xgene_ring_mgr_init(pdata))
594 return -ENODEV; 594 return -ENODEV;
595 595
596 clk_prepare_enable(pdata->clk); 596 if (!efi_enabled(EFI_BOOT)) {
597 clk_disable_unprepare(pdata->clk); 597 clk_prepare_enable(pdata->clk);
598 clk_prepare_enable(pdata->clk); 598 clk_disable_unprepare(pdata->clk);
599 xgene_enet_ecc_init(pdata); 599 clk_prepare_enable(pdata->clk);
600 xgene_enet_ecc_init(pdata);
601 }
600 xgene_enet_config_ring_if_assoc(pdata); 602 xgene_enet_config_ring_if_assoc(pdata);
601 603
602 /* Enable auto-incr for scanning */ 604 /* Enable auto-incr for scanning */
@@ -663,15 +665,20 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
663 struct phy_device *phy_dev; 665 struct phy_device *phy_dev;
664 struct device *dev = &pdata->pdev->dev; 666 struct device *dev = &pdata->pdev->dev;
665 667
666 phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0); 668 if (dev->of_node) {
667 if (!phy_np) { 669 phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
668 netdev_dbg(ndev, "No phy-handle found\n"); 670 if (!phy_np) {
669 return -ENODEV; 671 netdev_dbg(ndev, "No phy-handle found in DT\n");
672 return -ENODEV;
673 }
674 pdata->phy_dev = of_phy_find_device(phy_np);
670 } 675 }
671 676
672 phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link, 677 phy_dev = pdata->phy_dev;
673 0, pdata->phy_mode); 678
674 if (!phy_dev) { 679 if (!phy_dev ||
680 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
681 pdata->phy_mode)) {
675 netdev_err(ndev, "Could not connect to PHY\n"); 682 netdev_err(ndev, "Could not connect to PHY\n");
676 return -ENODEV; 683 return -ENODEV;
677 } 684 }
@@ -681,32 +688,71 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
681 ~SUPPORTED_100baseT_Half & 688 ~SUPPORTED_100baseT_Half &
682 ~SUPPORTED_1000baseT_Half; 689 ~SUPPORTED_1000baseT_Half;
683 phy_dev->advertising = phy_dev->supported; 690 phy_dev->advertising = phy_dev->supported;
684 pdata->phy_dev = phy_dev;
685 691
686 return 0; 692 return 0;
687} 693}
688 694
689int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) 695static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
696 struct mii_bus *mdio)
690{ 697{
691 struct net_device *ndev = pdata->ndev;
692 struct device *dev = &pdata->pdev->dev; 698 struct device *dev = &pdata->pdev->dev;
699 struct net_device *ndev = pdata->ndev;
700 struct phy_device *phy;
693 struct device_node *child_np; 701 struct device_node *child_np;
694 struct device_node *mdio_np = NULL; 702 struct device_node *mdio_np = NULL;
695 struct mii_bus *mdio_bus;
696 int ret; 703 int ret;
704 u32 phy_id;
705
706 if (dev->of_node) {
707 for_each_child_of_node(dev->of_node, child_np) {
708 if (of_device_is_compatible(child_np,
709 "apm,xgene-mdio")) {
710 mdio_np = child_np;
711 break;
712 }
713 }
697 714
698 for_each_child_of_node(dev->of_node, child_np) { 715 if (!mdio_np) {
699 if (of_device_is_compatible(child_np, "apm,xgene-mdio")) { 716 netdev_dbg(ndev, "No mdio node in the dts\n");
700 mdio_np = child_np; 717 return -ENXIO;
701 break;
702 } 718 }
703 }
704 719
705 if (!mdio_np) { 720 return of_mdiobus_register(mdio, mdio_np);
706 netdev_dbg(ndev, "No mdio node in the dts\n");
707 return -ENXIO;
708 } 721 }
709 722
723 /* Mask out all PHYs from auto probing. */
724 mdio->phy_mask = ~0;
725
726 /* Register the MDIO bus */
727 ret = mdiobus_register(mdio);
728 if (ret)
729 return ret;
730
731 ret = device_property_read_u32(dev, "phy-channel", &phy_id);
732 if (ret)
733 ret = device_property_read_u32(dev, "phy-addr", &phy_id);
734 if (ret)
735 return -EINVAL;
736
737 phy = get_phy_device(mdio, phy_id, true);
738 if (!phy || IS_ERR(phy))
739 return -EIO;
740
741 ret = phy_device_register(phy);
742 if (ret)
743 phy_device_free(phy);
744 else
745 pdata->phy_dev = phy;
746
747 return ret;
748}
749
750int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
751{
752 struct net_device *ndev = pdata->ndev;
753 struct mii_bus *mdio_bus;
754 int ret;
755
710 mdio_bus = mdiobus_alloc(); 756 mdio_bus = mdiobus_alloc();
711 if (!mdio_bus) 757 if (!mdio_bus)
712 return -ENOMEM; 758 return -ENOMEM;
@@ -720,7 +766,7 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
720 mdio_bus->priv = pdata; 766 mdio_bus->priv = pdata;
721 mdio_bus->parent = &ndev->dev; 767 mdio_bus->parent = &ndev->dev;
722 768
723 ret = of_mdiobus_register(mdio_bus, mdio_np); 769 ret = xgene_mdiobus_register(pdata, mdio_bus);
724 if (ret) { 770 if (ret) {
725 netdev_err(ndev, "Failed to register MDIO bus\n"); 771 netdev_err(ndev, "Failed to register MDIO bus\n");
726 mdiobus_free(mdio_bus); 772 mdiobus_free(mdio_bus);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 793f3b73eeff..44b15373d6b3 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -24,6 +24,10 @@
24#include "xgene_enet_sgmac.h" 24#include "xgene_enet_sgmac.h"
25#include "xgene_enet_xgmac.h" 25#include "xgene_enet_xgmac.h"
26 26
27#define RES_ENET_CSR 0
28#define RES_RING_CSR 1
29#define RES_RING_CMD 2
30
27static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) 31static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
28{ 32{
29 struct xgene_enet_raw_desc16 *raw_desc; 33 struct xgene_enet_raw_desc16 *raw_desc;
@@ -748,6 +752,41 @@ static const struct net_device_ops xgene_ndev_ops = {
748 .ndo_set_mac_address = xgene_enet_set_mac_address, 752 .ndo_set_mac_address = xgene_enet_set_mac_address,
749}; 753};
750 754
755static int xgene_get_mac_address(struct device *dev,
756 unsigned char *addr)
757{
758 int ret;
759
760 ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
761 if (ret)
762 ret = device_property_read_u8_array(dev, "mac-address",
763 addr, 6);
764 if (ret)
765 return -ENODEV;
766
767 return ETH_ALEN;
768}
769
770static int xgene_get_phy_mode(struct device *dev)
771{
772 int i, ret;
773 char *modestr;
774
775 ret = device_property_read_string(dev, "phy-connection-type",
776 (const char **)&modestr);
777 if (ret)
778 ret = device_property_read_string(dev, "phy-mode",
779 (const char **)&modestr);
780 if (ret)
781 return -ENODEV;
782
783 for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
784 if (!strcasecmp(modestr, phy_modes(i)))
785 return i;
786 }
787 return -ENODEV;
788}
789
751static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) 790static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
752{ 791{
753 struct platform_device *pdev; 792 struct platform_device *pdev;
@@ -755,32 +794,45 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
755 struct device *dev; 794 struct device *dev;
756 struct resource *res; 795 struct resource *res;
757 void __iomem *base_addr; 796 void __iomem *base_addr;
758 const char *mac;
759 int ret; 797 int ret;
760 798
761 pdev = pdata->pdev; 799 pdev = pdata->pdev;
762 dev = &pdev->dev; 800 dev = &pdev->dev;
763 ndev = pdata->ndev; 801 ndev = pdata->ndev;
764 802
765 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr"); 803 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
766 pdata->base_addr = devm_ioremap_resource(dev, res); 804 if (!res) {
767 if (IS_ERR(pdata->base_addr)) { 805 dev_err(dev, "Resource enet_csr not defined\n");
806 return -ENODEV;
807 }
808 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
809 if (!pdata->base_addr) {
768 dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); 810 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
769 return PTR_ERR(pdata->base_addr); 811 return -ENOMEM;
770 } 812 }
771 813
772 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr"); 814 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
773 pdata->ring_csr_addr = devm_ioremap_resource(dev, res); 815 if (!res) {
774 if (IS_ERR(pdata->ring_csr_addr)) { 816 dev_err(dev, "Resource ring_csr not defined\n");
817 return -ENODEV;
818 }
819 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
820 resource_size(res));
821 if (!pdata->ring_csr_addr) {
775 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); 822 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
776 return PTR_ERR(pdata->ring_csr_addr); 823 return -ENOMEM;
777 } 824 }
778 825
779 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd"); 826 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
780 pdata->ring_cmd_addr = devm_ioremap_resource(dev, res); 827 if (!res) {
781 if (IS_ERR(pdata->ring_cmd_addr)) { 828 dev_err(dev, "Resource ring_cmd not defined\n");
829 return -ENODEV;
830 }
831 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
832 resource_size(res));
833 if (!pdata->ring_cmd_addr) {
782 dev_err(dev, "Unable to retrieve ENET Ring command region\n"); 834 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
783 return PTR_ERR(pdata->ring_cmd_addr); 835 return -ENOMEM;
784 } 836 }
785 837
786 ret = platform_get_irq(pdev, 0); 838 ret = platform_get_irq(pdev, 0);
@@ -791,14 +843,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
791 } 843 }
792 pdata->rx_irq = ret; 844 pdata->rx_irq = ret;
793 845
794 mac = of_get_mac_address(dev->of_node); 846 if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
795 if (mac)
796 memcpy(ndev->dev_addr, mac, ndev->addr_len);
797 else
798 eth_hw_addr_random(ndev); 847 eth_hw_addr_random(ndev);
848
799 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 849 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
800 850
801 pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node); 851 pdata->phy_mode = xgene_get_phy_mode(dev);
802 if (pdata->phy_mode < 0) { 852 if (pdata->phy_mode < 0) {
803 dev_err(dev, "Unable to get phy-connection-type\n"); 853 dev_err(dev, "Unable to get phy-connection-type\n");
804 return pdata->phy_mode; 854 return pdata->phy_mode;
@@ -811,11 +861,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
811 } 861 }
812 862
813 pdata->clk = devm_clk_get(&pdev->dev, NULL); 863 pdata->clk = devm_clk_get(&pdev->dev, NULL);
814 ret = IS_ERR(pdata->clk);
815 if (IS_ERR(pdata->clk)) { 864 if (IS_ERR(pdata->clk)) {
816 dev_err(&pdev->dev, "can't get clock\n"); 865 /* Firmware may have set up the clock already. */
817 ret = PTR_ERR(pdata->clk); 866 pdata->clk = NULL;
818 return ret;
819 } 867 }
820 868
821 base_addr = pdata->base_addr; 869 base_addr = pdata->base_addr;
@@ -926,7 +974,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
926 goto err; 974 goto err;
927 } 975 }
928 976
929 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 977 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
930 if (ret) { 978 if (ret) {
931 netdev_err(ndev, "No usable DMA configuration\n"); 979 netdev_err(ndev, "No usable DMA configuration\n");
932 goto err; 980 goto err;
@@ -974,17 +1022,26 @@ static int xgene_enet_remove(struct platform_device *pdev)
974 return 0; 1022 return 0;
975} 1023}
976 1024
977static struct of_device_id xgene_enet_match[] = { 1025#ifdef CONFIG_ACPI
1026static const struct acpi_device_id xgene_enet_acpi_match[] = {
1027 { "APMC0D05", },
1028 { }
1029};
1030MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1031#endif
1032
1033static struct of_device_id xgene_enet_of_match[] = {
978 {.compatible = "apm,xgene-enet",}, 1034 {.compatible = "apm,xgene-enet",},
979 {}, 1035 {},
980}; 1036};
981 1037
982MODULE_DEVICE_TABLE(of, xgene_enet_match); 1038MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
983 1039
984static struct platform_driver xgene_enet_driver = { 1040static struct platform_driver xgene_enet_driver = {
985 .driver = { 1041 .driver = {
986 .name = "xgene-enet", 1042 .name = "xgene-enet",
987 .of_match_table = xgene_enet_match, 1043 .of_match_table = of_match_ptr(xgene_enet_of_match),
1044 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
988 }, 1045 },
989 .probe = xgene_enet_probe, 1046 .probe = xgene_enet_probe,
990 .remove = xgene_enet_remove, 1047 .remove = xgene_enet_remove,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index f9958fae6ffd..c2d465c3db66 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -22,7 +22,10 @@
22#ifndef __XGENE_ENET_MAIN_H__ 22#ifndef __XGENE_ENET_MAIN_H__
23#define __XGENE_ENET_MAIN_H__ 23#define __XGENE_ENET_MAIN_H__
24 24
25#include <linux/acpi.h>
25#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/efi.h>
28#include <linux/io.h>
26#include <linux/of_platform.h> 29#include <linux/of_platform.h>
27#include <linux/of_net.h> 30#include <linux/of_net.h>
28#include <linux/of_mdio.h> 31#include <linux/of_mdio.h>
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index c9946c6c119e..587f63e87588 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2235,8 +2235,8 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
2235 return NETDEV_TX_OK; 2235 return NETDEV_TX_OK;
2236 } 2236 }
2237 2237
2238 if (unlikely(vlan_tx_tag_present(skb))) { 2238 if (unlikely(skb_vlan_tag_present(skb))) {
2239 u16 vlan = vlan_tx_tag_get(skb); 2239 u16 vlan = skb_vlan_tag_get(skb);
2240 __le16 tag; 2240 __le16 tag;
2241 2241
2242 vlan = cpu_to_le16(vlan); 2242 vlan = cpu_to_le16(vlan);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 2326579f9454..59a03a193e83 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1892,8 +1892,8 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
1892 1892
1893 tpd = atl1e_get_tpd(adapter); 1893 tpd = atl1e_get_tpd(adapter);
1894 1894
1895 if (vlan_tx_tag_present(skb)) { 1895 if (skb_vlan_tag_present(skb)) {
1896 u16 vlan_tag = vlan_tx_tag_get(skb); 1896 u16 vlan_tag = skb_vlan_tag_get(skb);
1897 u16 atl1e_vlan_tag; 1897 u16 atl1e_vlan_tag;
1898 1898
1899 tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; 1899 tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
@@ -2373,9 +2373,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2373 2373
2374 netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64); 2374 netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
2375 2375
2376 init_timer(&adapter->phy_config_timer); 2376 setup_timer(&adapter->phy_config_timer, atl1e_phy_config,
2377 adapter->phy_config_timer.function = atl1e_phy_config; 2377 (unsigned long)adapter);
2378 adapter->phy_config_timer.data = (unsigned long) adapter;
2379 2378
2380 /* get user settings */ 2379 /* get user settings */
2381 atl1e_check_options(adapter); 2380 atl1e_check_options(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 2c8f398aeda9..eca1d113fee1 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2415,8 +2415,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
2415 (u16) atomic_read(&tpd_ring->next_to_use)); 2415 (u16) atomic_read(&tpd_ring->next_to_use));
2416 memset(ptpd, 0, sizeof(struct tx_packet_desc)); 2416 memset(ptpd, 0, sizeof(struct tx_packet_desc));
2417 2417
2418 if (vlan_tx_tag_present(skb)) { 2418 if (skb_vlan_tag_present(skb)) {
2419 vlan_tag = vlan_tx_tag_get(skb); 2419 vlan_tag = skb_vlan_tag_get(skb);
2420 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | 2420 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
2421 ((vlan_tag >> 9) & 0x8); 2421 ((vlan_tag >> 9) & 0x8);
2422 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; 2422 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 84a09e8ddd9c..46a535318c7a 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -887,8 +887,8 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
887 offset = ((u32)(skb->len-copy_len + 3) & ~3); 887 offset = ((u32)(skb->len-copy_len + 3) & ~3);
888 } 888 }
889#ifdef NETIF_F_HW_VLAN_CTAG_TX 889#ifdef NETIF_F_HW_VLAN_CTAG_TX
890 if (vlan_tx_tag_present(skb)) { 890 if (skb_vlan_tag_present(skb)) {
891 u16 vlan_tag = vlan_tx_tag_get(skb); 891 u16 vlan_tag = skb_vlan_tag_get(skb);
892 vlan_tag = (vlan_tag << 4) | 892 vlan_tag = (vlan_tag << 4) |
893 (vlan_tag >> 13) | 893 (vlan_tag >> 13) |
894 ((vlan_tag >> 9) & 0x8); 894 ((vlan_tag >> 9) & 0x8);
@@ -1436,13 +1436,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1436 1436
1437 atl2_check_options(adapter); 1437 atl2_check_options(adapter);
1438 1438
1439 init_timer(&adapter->watchdog_timer); 1439 setup_timer(&adapter->watchdog_timer, atl2_watchdog,
1440 adapter->watchdog_timer.function = atl2_watchdog; 1440 (unsigned long)adapter);
1441 adapter->watchdog_timer.data = (unsigned long) adapter;
1442 1441
1443 init_timer(&adapter->phy_config_timer); 1442 setup_timer(&adapter->phy_config_timer, atl2_phy_config,
1444 adapter->phy_config_timer.function = atl2_phy_config; 1443 (unsigned long)adapter);
1445 adapter->phy_config_timer.data = (unsigned long) adapter;
1446 1444
1447 INIT_WORK(&adapter->reset_task, atl2_reset_task); 1445 INIT_WORK(&adapter->reset_task, atl2_reset_task);
1448 INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task); 1446 INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 823d01c5684c..02bf0b86995b 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6597,9 +6597,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6597 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 6597 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6598 } 6598 }
6599 6599
6600 if (vlan_tx_tag_present(skb)) { 6600 if (skb_vlan_tag_present(skb)) {
6601 vlan_tag_flags |= 6601 vlan_tag_flags |=
6602 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); 6602 (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6603 } 6603 }
6604 6604
6605 if ((mss = skb_shinfo(skb)->gso_size)) { 6605 if ((mss = skb_shinfo(skb)->gso_size)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index c3a6072134f5..756053c028be 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -22,7 +22,7 @@
22 22
23#include <linux/ptp_clock_kernel.h> 23#include <linux/ptp_clock_kernel.h>
24#include <linux/net_tstamp.h> 24#include <linux/net_tstamp.h>
25#include <linux/clocksource.h> 25#include <linux/timecounter.h>
26 26
27/* compilation time flags */ 27/* compilation time flags */
28 28
@@ -1138,12 +1138,8 @@ struct bnx2x_port {
1138 u32 link_config[LINK_CONFIG_SIZE]; 1138 u32 link_config[LINK_CONFIG_SIZE];
1139 1139
1140 u32 supported[LINK_CONFIG_SIZE]; 1140 u32 supported[LINK_CONFIG_SIZE];
1141/* link settings - missing defines */
1142#define SUPPORTED_2500baseX_Full (1 << 15)
1143 1141
1144 u32 advertising[LINK_CONFIG_SIZE]; 1142 u32 advertising[LINK_CONFIG_SIZE];
1145/* link settings - missing defines */
1146#define ADVERTISED_2500baseX_Full (1 << 15)
1147 1143
1148 u32 phy_addr; 1144 u32 phy_addr;
1149 1145
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e468ed3f210f..0a9faa134a9a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3865,9 +3865,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3865 "sending pkt %u @%p next_idx %u bd %u @%p\n", 3865 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3866 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); 3866 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3867 3867
3868 if (vlan_tx_tag_present(skb)) { 3868 if (skb_vlan_tag_present(skb)) {
3869 tx_start_bd->vlan_or_ethertype = 3869 tx_start_bd->vlan_or_ethertype =
3870 cpu_to_le16(vlan_tx_tag_get(skb)); 3870 cpu_to_le16(skb_vlan_tag_get(skb));
3871 tx_start_bd->bd_flags.as_bitfield |= 3871 tx_start_bd->bd_flags.as_bitfield |=
3872 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 3872 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3873 } else { 3873 } else {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 72eef9fc883e..7155e1d2c208 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9169,7 +9169,7 @@ static void bnx2x_disable_ptp(struct bnx2x *bp)
9169} 9169}
9170 9170
9171/* Called during unload, to stop PTP-related stuff */ 9171/* Called during unload, to stop PTP-related stuff */
9172void bnx2x_stop_ptp(struct bnx2x *bp) 9172static void bnx2x_stop_ptp(struct bnx2x *bp)
9173{ 9173{
9174 /* Cancel PTP work queue. Should be done after the Tx queues are 9174 /* Cancel PTP work queue. Should be done after the Tx queues are
9175 * drained to prevent additional scheduling. 9175 * drained to prevent additional scheduling.
@@ -13267,14 +13267,10 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13267static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 13267static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13268{ 13268{
13269 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13269 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13270 u64 now;
13271 13270
13272 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); 13271 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13273 13272
13274 now = timecounter_read(&bp->timecounter); 13273 timecounter_adjtime(&bp->timecounter, delta);
13275 now += delta;
13276 /* Re-init the timecounter */
13277 timecounter_init(&bp->timecounter, &bp->cyclecounter, now);
13278 13274
13279 return 0; 13275 return 0;
13280} 13276}
@@ -13322,7 +13318,7 @@ static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13322 return -ENOTSUPP; 13318 return -ENOTSUPP;
13323} 13319}
13324 13320
13325void bnx2x_register_phc(struct bnx2x *bp) 13321static void bnx2x_register_phc(struct bnx2x *bp)
13326{ 13322{
13327 /* Fill the ptp_clock_info struct and register PTP clock*/ 13323 /* Fill the ptp_clock_info struct and register PTP clock*/
13328 bp->ptp_clock_info.owner = THIS_MODULE; 13324 bp->ptp_clock_info.owner = THIS_MODULE;
@@ -14614,7 +14610,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp)
14614{ 14610{
14615 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); 14611 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
14616 bp->cyclecounter.read = bnx2x_cyclecounter_read; 14612 bp->cyclecounter.read = bnx2x_cyclecounter_read;
14617 bp->cyclecounter.mask = CLOCKSOURCE_MASK(64); 14613 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
14618 bp->cyclecounter.shift = 1; 14614 bp->cyclecounter.shift = 1;
14619 bp->cyclecounter.mult = 1; 14615 bp->cyclecounter.mult = 1;
14620} 14616}
@@ -14639,7 +14635,7 @@ static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
14639 return bnx2x_func_state_change(bp, &func_params); 14635 return bnx2x_func_state_change(bp, &func_params);
14640} 14636}
14641 14637
14642int bnx2x_enable_ptp_packets(struct bnx2x *bp) 14638static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
14643{ 14639{
14644 struct bnx2x_queue_state_params q_params; 14640 struct bnx2x_queue_state_params q_params;
14645 int rc, i; 14641 int rc, i;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 96bf01ba32dd..615a6dbde047 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8008,9 +8008,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8008 !mss && skb->len > VLAN_ETH_FRAME_LEN) 8008 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8009 base_flags |= TXD_FLAG_JMB_PKT; 8009 base_flags |= TXD_FLAG_JMB_PKT;
8010 8010
8011 if (vlan_tx_tag_present(skb)) { 8011 if (skb_vlan_tag_present(skb)) {
8012 base_flags |= TXD_FLAG_VLAN; 8012 base_flags |= TXD_FLAG_VLAN;
8013 vlan = vlan_tx_tag_get(skb); 8013 vlan = skb_vlan_tag_get(skb);
8014 } 8014 }
8015 8015
8016 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 8016 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
@@ -11573,11 +11573,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11573 tg3_flag_set(tp, INIT_COMPLETE); 11573 tg3_flag_set(tp, INIT_COMPLETE);
11574 tg3_enable_ints(tp); 11574 tg3_enable_ints(tp);
11575 11575
11576 if (init) 11576 tg3_ptp_resume(tp);
11577 tg3_ptp_init(tp);
11578 else
11579 tg3_ptp_resume(tp);
11580
11581 11577
11582 tg3_full_unlock(tp); 11578 tg3_full_unlock(tp);
11583 11579
@@ -11698,13 +11694,6 @@ static int tg3_open(struct net_device *dev)
11698 pci_set_power_state(tp->pdev, PCI_D3hot); 11694 pci_set_power_state(tp->pdev, PCI_D3hot);
11699 } 11695 }
11700 11696
11701 if (tg3_flag(tp, PTP_CAPABLE)) {
11702 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11703 &tp->pdev->dev);
11704 if (IS_ERR(tp->ptp_clock))
11705 tp->ptp_clock = NULL;
11706 }
11707
11708 return err; 11697 return err;
11709} 11698}
11710 11699
@@ -11718,8 +11707,6 @@ static int tg3_close(struct net_device *dev)
11718 return -EAGAIN; 11707 return -EAGAIN;
11719 } 11708 }
11720 11709
11721 tg3_ptp_fini(tp);
11722
11723 tg3_stop(tp); 11710 tg3_stop(tp);
11724 11711
11725 /* Clear stats across close / open calls */ 11712 /* Clear stats across close / open calls */
@@ -17897,6 +17884,14 @@ static int tg3_init_one(struct pci_dev *pdev,
17897 goto err_out_apeunmap; 17884 goto err_out_apeunmap;
17898 } 17885 }
17899 17886
17887 if (tg3_flag(tp, PTP_CAPABLE)) {
17888 tg3_ptp_init(tp);
17889 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17890 &tp->pdev->dev);
17891 if (IS_ERR(tp->ptp_clock))
17892 tp->ptp_clock = NULL;
17893 }
17894
17900 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17895 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17901 tp->board_part_number, 17896 tp->board_part_number,
17902 tg3_chip_rev_id(tp), 17897 tg3_chip_rev_id(tp),
@@ -17972,6 +17967,8 @@ static void tg3_remove_one(struct pci_dev *pdev)
17972 if (dev) { 17967 if (dev) {
17973 struct tg3 *tp = netdev_priv(dev); 17968 struct tg3 *tp = netdev_priv(dev);
17974 17969
17970 tg3_ptp_fini(tp);
17971
17975 release_firmware(tp->fw); 17972 release_firmware(tp->fw);
17976 17973
17977 tg3_reset_task_cancel(tp); 17974 tg3_reset_task_cancel(tp);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 323721838cf9..7714d7790089 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2824,8 +2824,8 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2824 u32 gso_size; 2824 u32 gso_size;
2825 u16 vlan_tag = 0; 2825 u16 vlan_tag = 0;
2826 2826
2827 if (vlan_tx_tag_present(skb)) { 2827 if (skb_vlan_tag_present(skb)) {
2828 vlan_tag = (u16)vlan_tx_tag_get(skb); 2828 vlan_tag = (u16)skb_vlan_tag_get(skb);
2829 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); 2829 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2830 } 2830 }
2831 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { 2831 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 3767271c7667..ad76b8e35a00 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1691,7 +1691,7 @@ static int hash_get_index(__u8 *addr)
1691 1691
1692 for (j = 0; j < 6; j++) { 1692 for (j = 0; j < 6; j++) {
1693 for (i = 0, bitval = 0; i < 8; i++) 1693 for (i = 0, bitval = 0; i < 8; i++)
1694 bitval ^= hash_bit_value(i*6 + j, addr); 1694 bitval ^= hash_bit_value(i * 6 + j, addr);
1695 1695
1696 hash_index |= (bitval << j); 1696 hash_index |= (bitval << j);
1697 } 1697 }
@@ -1827,12 +1827,23 @@ static int macb_close(struct net_device *dev)
1827 1827
1828static void gem_update_stats(struct macb *bp) 1828static void gem_update_stats(struct macb *bp)
1829{ 1829{
1830 u32 __iomem *reg = bp->regs + GEM_OTX; 1830 int i;
1831 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 1831 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1832 u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
1833 1832
1834 for (; p < end; p++, reg++) 1833 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1835 *p += __raw_readl(reg); 1834 u32 offset = gem_statistics[i].offset;
1835 u64 val = __raw_readl(bp->regs + offset);
1836
1837 bp->ethtool_stats[i] += val;
1838 *p += val;
1839
1840 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1841 /* Add GEM_OCTTXH, GEM_OCTRXH */
1842 val = __raw_readl(bp->regs + offset + 4);
1843 bp->ethtool_stats[i] += ((u64)val) << 32;
1844 *(++p) += val;
1845 }
1846 }
1836} 1847}
1837 1848
1838static struct net_device_stats *gem_get_stats(struct macb *bp) 1849static struct net_device_stats *gem_get_stats(struct macb *bp)
@@ -1873,6 +1884,39 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
1873 return nstat; 1884 return nstat;
1874} 1885}
1875 1886
1887static void gem_get_ethtool_stats(struct net_device *dev,
1888 struct ethtool_stats *stats, u64 *data)
1889{
1890 struct macb *bp;
1891
1892 bp = netdev_priv(dev);
1893 gem_update_stats(bp);
1894 memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
1895}
1896
1897static int gem_get_sset_count(struct net_device *dev, int sset)
1898{
1899 switch (sset) {
1900 case ETH_SS_STATS:
1901 return GEM_STATS_LEN;
1902 default:
1903 return -EOPNOTSUPP;
1904 }
1905}
1906
1907static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
1908{
1909 int i;
1910
1911 switch (sset) {
1912 case ETH_SS_STATS:
1913 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
1914 memcpy(p, gem_statistics[i].stat_string,
1915 ETH_GSTRING_LEN);
1916 break;
1917 }
1918}
1919
1876struct net_device_stats *macb_get_stats(struct net_device *dev) 1920struct net_device_stats *macb_get_stats(struct net_device *dev)
1877{ 1921{
1878 struct macb *bp = netdev_priv(dev); 1922 struct macb *bp = netdev_priv(dev);
@@ -1991,6 +2035,18 @@ const struct ethtool_ops macb_ethtool_ops = {
1991}; 2035};
1992EXPORT_SYMBOL_GPL(macb_ethtool_ops); 2036EXPORT_SYMBOL_GPL(macb_ethtool_ops);
1993 2037
2038static const struct ethtool_ops gem_ethtool_ops = {
2039 .get_settings = macb_get_settings,
2040 .set_settings = macb_set_settings,
2041 .get_regs_len = macb_get_regs_len,
2042 .get_regs = macb_get_regs,
2043 .get_link = ethtool_op_get_link,
2044 .get_ts_info = ethtool_op_get_ts_info,
2045 .get_ethtool_stats = gem_get_ethtool_stats,
2046 .get_strings = gem_get_ethtool_strings,
2047 .get_sset_count = gem_get_sset_count,
2048};
2049
1994int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2050int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1995{ 2051{
1996 struct macb *bp = netdev_priv(dev); 2052 struct macb *bp = netdev_priv(dev);
@@ -2148,7 +2204,7 @@ static void macb_probe_queues(void __iomem *mem,
2148 (*num_queues)++; 2204 (*num_queues)++;
2149} 2205}
2150 2206
2151static int __init macb_probe(struct platform_device *pdev) 2207static int macb_probe(struct platform_device *pdev)
2152{ 2208{
2153 struct macb_platform_data *pdata; 2209 struct macb_platform_data *pdata;
2154 struct resource *regs; 2210 struct resource *regs;
@@ -2278,7 +2334,6 @@ static int __init macb_probe(struct platform_device *pdev)
2278 2334
2279 dev->netdev_ops = &macb_netdev_ops; 2335 dev->netdev_ops = &macb_netdev_ops;
2280 netif_napi_add(dev, &bp->napi, macb_poll, 64); 2336 netif_napi_add(dev, &bp->napi, macb_poll, 64);
2281 dev->ethtool_ops = &macb_ethtool_ops;
2282 2337
2283 dev->base_addr = regs->start; 2338 dev->base_addr = regs->start;
2284 2339
@@ -2292,12 +2347,14 @@ static int __init macb_probe(struct platform_device *pdev)
2292 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; 2347 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
2293 bp->macbgem_ops.mog_init_rings = gem_init_rings; 2348 bp->macbgem_ops.mog_init_rings = gem_init_rings;
2294 bp->macbgem_ops.mog_rx = gem_rx; 2349 bp->macbgem_ops.mog_rx = gem_rx;
2350 dev->ethtool_ops = &gem_ethtool_ops;
2295 } else { 2351 } else {
2296 bp->max_tx_length = MACB_MAX_TX_LEN; 2352 bp->max_tx_length = MACB_MAX_TX_LEN;
2297 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; 2353 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
2298 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; 2354 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
2299 bp->macbgem_ops.mog_init_rings = macb_init_rings; 2355 bp->macbgem_ops.mog_init_rings = macb_init_rings;
2300 bp->macbgem_ops.mog_rx = macb_rx; 2356 bp->macbgem_ops.mog_rx = macb_rx;
2357 dev->ethtool_ops = &macb_ethtool_ops;
2301 } 2358 }
2302 2359
2303 /* Set features */ 2360 /* Set features */
@@ -2386,7 +2443,7 @@ err_out:
2386 return err; 2443 return err;
2387} 2444}
2388 2445
2389static int __exit macb_remove(struct platform_device *pdev) 2446static int macb_remove(struct platform_device *pdev)
2390{ 2447{
2391 struct net_device *dev; 2448 struct net_device *dev;
2392 struct macb *bp; 2449 struct macb *bp;
@@ -2411,8 +2468,7 @@ static int __exit macb_remove(struct platform_device *pdev)
2411 return 0; 2468 return 0;
2412} 2469}
2413 2470
2414#ifdef CONFIG_PM 2471static int __maybe_unused macb_suspend(struct device *dev)
2415static int macb_suspend(struct device *dev)
2416{ 2472{
2417 struct platform_device *pdev = to_platform_device(dev); 2473 struct platform_device *pdev = to_platform_device(dev);
2418 struct net_device *netdev = platform_get_drvdata(pdev); 2474 struct net_device *netdev = platform_get_drvdata(pdev);
@@ -2429,7 +2485,7 @@ static int macb_suspend(struct device *dev)
2429 return 0; 2485 return 0;
2430} 2486}
2431 2487
2432static int macb_resume(struct device *dev) 2488static int __maybe_unused macb_resume(struct device *dev)
2433{ 2489{
2434 struct platform_device *pdev = to_platform_device(dev); 2490 struct platform_device *pdev = to_platform_device(dev);
2435 struct net_device *netdev = platform_get_drvdata(pdev); 2491 struct net_device *netdev = platform_get_drvdata(pdev);
@@ -2444,12 +2500,12 @@ static int macb_resume(struct device *dev)
2444 2500
2445 return 0; 2501 return 0;
2446} 2502}
2447#endif
2448 2503
2449static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume); 2504static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
2450 2505
2451static struct platform_driver macb_driver = { 2506static struct platform_driver macb_driver = {
2452 .remove = __exit_p(macb_remove), 2507 .probe = macb_probe,
2508 .remove = macb_remove,
2453 .driver = { 2509 .driver = {
2454 .name = "macb", 2510 .name = "macb",
2455 .of_match_table = of_match_ptr(macb_dt_ids), 2511 .of_match_table = of_match_ptr(macb_dt_ids),
@@ -2457,7 +2513,7 @@ static struct platform_driver macb_driver = {
2457 }, 2513 },
2458}; 2514};
2459 2515
2460module_platform_driver_probe(macb_driver, macb_probe); 2516module_platform_driver(macb_driver);
2461 2517
2462MODULE_LICENSE("GPL"); 2518MODULE_LICENSE("GPL");
2463MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 2519MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 084191b6fad2..31dc080f2437 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -15,263 +15,309 @@
15#define MACB_MAX_QUEUES 8 15#define MACB_MAX_QUEUES 8
16 16
17/* MACB register offsets */ 17/* MACB register offsets */
18#define MACB_NCR 0x0000 18#define MACB_NCR 0x0000 /* Network Control */
19#define MACB_NCFGR 0x0004 19#define MACB_NCFGR 0x0004 /* Network Config */
20#define MACB_NSR 0x0008 20#define MACB_NSR 0x0008 /* Network Status */
21#define MACB_TAR 0x000c /* AT91RM9200 only */ 21#define MACB_TAR 0x000c /* AT91RM9200 only */
22#define MACB_TCR 0x0010 /* AT91RM9200 only */ 22#define MACB_TCR 0x0010 /* AT91RM9200 only */
23#define MACB_TSR 0x0014 23#define MACB_TSR 0x0014 /* Transmit Status */
24#define MACB_RBQP 0x0018 24#define MACB_RBQP 0x0018 /* RX Q Base Address */
25#define MACB_TBQP 0x001c 25#define MACB_TBQP 0x001c /* TX Q Base Address */
26#define MACB_RSR 0x0020 26#define MACB_RSR 0x0020 /* Receive Status */
27#define MACB_ISR 0x0024 27#define MACB_ISR 0x0024 /* Interrupt Status */
28#define MACB_IER 0x0028 28#define MACB_IER 0x0028 /* Interrupt Enable */
29#define MACB_IDR 0x002c 29#define MACB_IDR 0x002c /* Interrupt Disable */
30#define MACB_IMR 0x0030 30#define MACB_IMR 0x0030 /* Interrupt Mask */
31#define MACB_MAN 0x0034 31#define MACB_MAN 0x0034 /* PHY Maintenance */
32#define MACB_PTR 0x0038 32#define MACB_PTR 0x0038
33#define MACB_PFR 0x003c 33#define MACB_PFR 0x003c
34#define MACB_FTO 0x0040 34#define MACB_FTO 0x0040
35#define MACB_SCF 0x0044 35#define MACB_SCF 0x0044
36#define MACB_MCF 0x0048 36#define MACB_MCF 0x0048
37#define MACB_FRO 0x004c 37#define MACB_FRO 0x004c
38#define MACB_FCSE 0x0050 38#define MACB_FCSE 0x0050
39#define MACB_ALE 0x0054 39#define MACB_ALE 0x0054
40#define MACB_DTF 0x0058 40#define MACB_DTF 0x0058
41#define MACB_LCOL 0x005c 41#define MACB_LCOL 0x005c
42#define MACB_EXCOL 0x0060 42#define MACB_EXCOL 0x0060
43#define MACB_TUND 0x0064 43#define MACB_TUND 0x0064
44#define MACB_CSE 0x0068 44#define MACB_CSE 0x0068
45#define MACB_RRE 0x006c 45#define MACB_RRE 0x006c
46#define MACB_ROVR 0x0070 46#define MACB_ROVR 0x0070
47#define MACB_RSE 0x0074 47#define MACB_RSE 0x0074
48#define MACB_ELE 0x0078 48#define MACB_ELE 0x0078
49#define MACB_RJA 0x007c 49#define MACB_RJA 0x007c
50#define MACB_USF 0x0080 50#define MACB_USF 0x0080
51#define MACB_STE 0x0084 51#define MACB_STE 0x0084
52#define MACB_RLE 0x0088 52#define MACB_RLE 0x0088
53#define MACB_TPF 0x008c 53#define MACB_TPF 0x008c
54#define MACB_HRB 0x0090 54#define MACB_HRB 0x0090
55#define MACB_HRT 0x0094 55#define MACB_HRT 0x0094
56#define MACB_SA1B 0x0098 56#define MACB_SA1B 0x0098
57#define MACB_SA1T 0x009c 57#define MACB_SA1T 0x009c
58#define MACB_SA2B 0x00a0 58#define MACB_SA2B 0x00a0
59#define MACB_SA2T 0x00a4 59#define MACB_SA2T 0x00a4
60#define MACB_SA3B 0x00a8 60#define MACB_SA3B 0x00a8
61#define MACB_SA3T 0x00ac 61#define MACB_SA3T 0x00ac
62#define MACB_SA4B 0x00b0 62#define MACB_SA4B 0x00b0
63#define MACB_SA4T 0x00b4 63#define MACB_SA4T 0x00b4
64#define MACB_TID 0x00b8 64#define MACB_TID 0x00b8
65#define MACB_TPQ 0x00bc 65#define MACB_TPQ 0x00bc
66#define MACB_USRIO 0x00c0 66#define MACB_USRIO 0x00c0
67#define MACB_WOL 0x00c4 67#define MACB_WOL 0x00c4
68#define MACB_MID 0x00fc 68#define MACB_MID 0x00fc
69 69
70/* GEM register offsets. */ 70/* GEM register offsets. */
71#define GEM_NCFGR 0x0004 71#define GEM_NCFGR 0x0004 /* Network Config */
72#define GEM_USRIO 0x000c 72#define GEM_USRIO 0x000c /* User IO */
73#define GEM_DMACFG 0x0010 73#define GEM_DMACFG 0x0010 /* DMA Configuration */
74#define GEM_HRB 0x0080 74#define GEM_HRB 0x0080 /* Hash Bottom */
75#define GEM_HRT 0x0084 75#define GEM_HRT 0x0084 /* Hash Top */
76#define GEM_SA1B 0x0088 76#define GEM_SA1B 0x0088 /* Specific1 Bottom */
77#define GEM_SA1T 0x008C 77#define GEM_SA1T 0x008C /* Specific1 Top */
78#define GEM_SA2B 0x0090 78#define GEM_SA2B 0x0090 /* Specific2 Bottom */
79#define GEM_SA2T 0x0094 79#define GEM_SA2T 0x0094 /* Specific2 Top */
80#define GEM_SA3B 0x0098 80#define GEM_SA3B 0x0098 /* Specific3 Bottom */
81#define GEM_SA3T 0x009C 81#define GEM_SA3T 0x009C /* Specific3 Top */
82#define GEM_SA4B 0x00A0 82#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
83#define GEM_SA4T 0x00A4 83#define GEM_SA4T 0x00A4 /* Specific4 Top */
84#define GEM_OTX 0x0100 84#define GEM_OTX 0x0100 /* Octets transmitted */
85#define GEM_DCFG1 0x0280 85#define GEM_OCTTXL 0x0100 /* Octets transmitted [31:0] */
86#define GEM_DCFG2 0x0284 86#define GEM_OCTTXH 0x0104 /* Octets transmitted [47:32] */
87#define GEM_DCFG3 0x0288 87#define GEM_TXCNT 0x0108 /* Frames Transmitted counter */
88#define GEM_DCFG4 0x028c 88#define GEM_TXBCCNT 0x010c /* Broadcast Frames counter */
89#define GEM_DCFG5 0x0290 89#define GEM_TXMCCNT 0x0110 /* Multicast Frames counter */
90#define GEM_DCFG6 0x0294 90#define GEM_TXPAUSECNT 0x0114 /* Pause Frames Transmitted Counter */
91#define GEM_DCFG7 0x0298 91#define GEM_TX64CNT 0x0118 /* 64 byte Frames TX counter */
92 92#define GEM_TX65CNT 0x011c /* 65-127 byte Frames TX counter */
93#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2)) 93#define GEM_TX128CNT 0x0120 /* 128-255 byte Frames TX counter */
94#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2)) 94#define GEM_TX256CNT 0x0124 /* 256-511 byte Frames TX counter */
95#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2)) 95#define GEM_TX512CNT 0x0128 /* 512-1023 byte Frames TX counter */
96#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2)) 96#define GEM_TX1024CNT 0x012c /* 1024-1518 byte Frames TX counter */
97#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2)) 97#define GEM_TX1519CNT 0x0130 /* 1519+ byte Frames TX counter */
98#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2)) 98#define GEM_TXURUNCNT 0x0134 /* TX under run error counter */
99#define GEM_SNGLCOLLCNT 0x0138 /* Single Collision Frame Counter */
100#define GEM_MULTICOLLCNT 0x013c /* Multiple Collision Frame Counter */
101#define GEM_EXCESSCOLLCNT 0x0140 /* Excessive Collision Frame Counter */
102#define GEM_LATECOLLCNT 0x0144 /* Late Collision Frame Counter */
103#define GEM_TXDEFERCNT 0x0148 /* Deferred Transmission Frame Counter */
104#define GEM_TXCSENSECNT 0x014c /* Carrier Sense Error Counter */
105#define GEM_ORX 0x0150 /* Octets received */
106#define GEM_OCTRXL 0x0150 /* Octets received [31:0] */
107#define GEM_OCTRXH 0x0154 /* Octets received [47:32] */
108#define GEM_RXCNT 0x0158 /* Frames Received Counter */
109#define GEM_RXBROADCNT 0x015c /* Broadcast Frames Received Counter */
110#define GEM_RXMULTICNT 0x0160 /* Multicast Frames Received Counter */
111#define GEM_RXPAUSECNT 0x0164 /* Pause Frames Received Counter */
112#define GEM_RX64CNT 0x0168 /* 64 byte Frames RX Counter */
113#define GEM_RX65CNT 0x016c /* 65-127 byte Frames RX Counter */
114#define GEM_RX128CNT 0x0170 /* 128-255 byte Frames RX Counter */
115#define GEM_RX256CNT 0x0174 /* 256-511 byte Frames RX Counter */
116#define GEM_RX512CNT 0x0178 /* 512-1023 byte Frames RX Counter */
117#define GEM_RX1024CNT 0x017c /* 1024-1518 byte Frames RX Counter */
118#define GEM_RX1519CNT 0x0180 /* 1519+ byte Frames RX Counter */
119#define GEM_RXUNDRCNT 0x0184 /* Undersize Frames Received Counter */
120#define GEM_RXOVRCNT 0x0188 /* Oversize Frames Received Counter */
121#define GEM_RXJABCNT 0x018c /* Jabbers Received Counter */
122#define GEM_RXFCSCNT 0x0190 /* Frame Check Sequence Error Counter */
123#define GEM_RXLENGTHCNT 0x0194 /* Length Field Error Counter */
124#define GEM_RXSYMBCNT 0x0198 /* Symbol Error Counter */
125#define GEM_RXALIGNCNT 0x019c /* Alignment Error Counter */
126#define GEM_RXRESERRCNT 0x01a0 /* Receive Resource Error Counter */
127#define GEM_RXORCNT 0x01a4 /* Receive Overrun Counter */
128#define GEM_RXIPCCNT 0x01a8 /* IP header Checksum Error Counter */
129#define GEM_RXTCPCCNT 0x01ac /* TCP Checksum Error Counter */
130#define GEM_RXUDPCCNT 0x01b0 /* UDP Checksum Error Counter */
131#define GEM_DCFG1 0x0280 /* Design Config 1 */
132#define GEM_DCFG2 0x0284 /* Design Config 2 */
133#define GEM_DCFG3 0x0288 /* Design Config 3 */
134#define GEM_DCFG4 0x028c /* Design Config 4 */
135#define GEM_DCFG5 0x0290 /* Design Config 5 */
136#define GEM_DCFG6 0x0294 /* Design Config 6 */
137#define GEM_DCFG7 0x0298 /* Design Config 7 */
138
139#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
140#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
141#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
142#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
143#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
144#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
99 145
100/* Bitfields in NCR */ 146/* Bitfields in NCR */
101#define MACB_LB_OFFSET 0 147#define MACB_LB_OFFSET 0 /* reserved */
102#define MACB_LB_SIZE 1 148#define MACB_LB_SIZE 1
103#define MACB_LLB_OFFSET 1 149#define MACB_LLB_OFFSET 1 /* Loop back local */
104#define MACB_LLB_SIZE 1 150#define MACB_LLB_SIZE 1
105#define MACB_RE_OFFSET 2 151#define MACB_RE_OFFSET 2 /* Receive enable */
106#define MACB_RE_SIZE 1 152#define MACB_RE_SIZE 1
107#define MACB_TE_OFFSET 3 153#define MACB_TE_OFFSET 3 /* Transmit enable */
108#define MACB_TE_SIZE 1 154#define MACB_TE_SIZE 1
109#define MACB_MPE_OFFSET 4 155#define MACB_MPE_OFFSET 4 /* Management port enable */
110#define MACB_MPE_SIZE 1 156#define MACB_MPE_SIZE 1
111#define MACB_CLRSTAT_OFFSET 5 157#define MACB_CLRSTAT_OFFSET 5 /* Clear stats regs */
112#define MACB_CLRSTAT_SIZE 1 158#define MACB_CLRSTAT_SIZE 1
113#define MACB_INCSTAT_OFFSET 6 159#define MACB_INCSTAT_OFFSET 6 /* Incremental stats regs */
114#define MACB_INCSTAT_SIZE 1 160#define MACB_INCSTAT_SIZE 1
115#define MACB_WESTAT_OFFSET 7 161#define MACB_WESTAT_OFFSET 7 /* Write enable stats regs */
116#define MACB_WESTAT_SIZE 1 162#define MACB_WESTAT_SIZE 1
117#define MACB_BP_OFFSET 8 163#define MACB_BP_OFFSET 8 /* Back pressure */
118#define MACB_BP_SIZE 1 164#define MACB_BP_SIZE 1
119#define MACB_TSTART_OFFSET 9 165#define MACB_TSTART_OFFSET 9 /* Start transmission */
120#define MACB_TSTART_SIZE 1 166#define MACB_TSTART_SIZE 1
121#define MACB_THALT_OFFSET 10 167#define MACB_THALT_OFFSET 10 /* Transmit halt */
122#define MACB_THALT_SIZE 1 168#define MACB_THALT_SIZE 1
123#define MACB_NCR_TPF_OFFSET 11 169#define MACB_NCR_TPF_OFFSET 11 /* Transmit pause frame */
124#define MACB_NCR_TPF_SIZE 1 170#define MACB_NCR_TPF_SIZE 1
125#define MACB_TZQ_OFFSET 12 171#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
126#define MACB_TZQ_SIZE 1 172#define MACB_TZQ_SIZE 1
127 173
128/* Bitfields in NCFGR */ 174/* Bitfields in NCFGR */
129#define MACB_SPD_OFFSET 0 175#define MACB_SPD_OFFSET 0 /* Speed */
130#define MACB_SPD_SIZE 1 176#define MACB_SPD_SIZE 1
131#define MACB_FD_OFFSET 1 177#define MACB_FD_OFFSET 1 /* Full duplex */
132#define MACB_FD_SIZE 1 178#define MACB_FD_SIZE 1
133#define MACB_BIT_RATE_OFFSET 2 179#define MACB_BIT_RATE_OFFSET 2 /* Discard non-VLAN frames */
134#define MACB_BIT_RATE_SIZE 1 180#define MACB_BIT_RATE_SIZE 1
135#define MACB_JFRAME_OFFSET 3 181#define MACB_JFRAME_OFFSET 3 /* reserved */
136#define MACB_JFRAME_SIZE 1 182#define MACB_JFRAME_SIZE 1
137#define MACB_CAF_OFFSET 4 183#define MACB_CAF_OFFSET 4 /* Copy all frames */
138#define MACB_CAF_SIZE 1 184#define MACB_CAF_SIZE 1
139#define MACB_NBC_OFFSET 5 185#define MACB_NBC_OFFSET 5 /* No broadcast */
140#define MACB_NBC_SIZE 1 186#define MACB_NBC_SIZE 1
141#define MACB_NCFGR_MTI_OFFSET 6 187#define MACB_NCFGR_MTI_OFFSET 6 /* Multicast hash enable */
142#define MACB_NCFGR_MTI_SIZE 1 188#define MACB_NCFGR_MTI_SIZE 1
143#define MACB_UNI_OFFSET 7 189#define MACB_UNI_OFFSET 7 /* Unicast hash enable */
144#define MACB_UNI_SIZE 1 190#define MACB_UNI_SIZE 1
145#define MACB_BIG_OFFSET 8 191#define MACB_BIG_OFFSET 8 /* Receive 1536 byte frames */
146#define MACB_BIG_SIZE 1 192#define MACB_BIG_SIZE 1
147#define MACB_EAE_OFFSET 9 193#define MACB_EAE_OFFSET 9 /* External address match enable */
148#define MACB_EAE_SIZE 1 194#define MACB_EAE_SIZE 1
149#define MACB_CLK_OFFSET 10 195#define MACB_CLK_OFFSET 10
150#define MACB_CLK_SIZE 2 196#define MACB_CLK_SIZE 2
151#define MACB_RTY_OFFSET 12 197#define MACB_RTY_OFFSET 12 /* Retry test */
152#define MACB_RTY_SIZE 1 198#define MACB_RTY_SIZE 1
153#define MACB_PAE_OFFSET 13 199#define MACB_PAE_OFFSET 13 /* Pause enable */
154#define MACB_PAE_SIZE 1 200#define MACB_PAE_SIZE 1
155#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */ 201#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */
156#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */ 202#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */
157#define MACB_RBOF_OFFSET 14 203#define MACB_RBOF_OFFSET 14 /* Receive buffer offset */
158#define MACB_RBOF_SIZE 2 204#define MACB_RBOF_SIZE 2
159#define MACB_RLCE_OFFSET 16 205#define MACB_RLCE_OFFSET 16 /* Length field error frame discard */
160#define MACB_RLCE_SIZE 1 206#define MACB_RLCE_SIZE 1
161#define MACB_DRFCS_OFFSET 17 207#define MACB_DRFCS_OFFSET 17 /* FCS remove */
162#define MACB_DRFCS_SIZE 1 208#define MACB_DRFCS_SIZE 1
163#define MACB_EFRHD_OFFSET 18 209#define MACB_EFRHD_OFFSET 18
164#define MACB_EFRHD_SIZE 1 210#define MACB_EFRHD_SIZE 1
165#define MACB_IRXFCS_OFFSET 19 211#define MACB_IRXFCS_OFFSET 19
166#define MACB_IRXFCS_SIZE 1 212#define MACB_IRXFCS_SIZE 1
167 213
168/* GEM specific NCFGR bitfields. */ 214/* GEM specific NCFGR bitfields. */
169#define GEM_GBE_OFFSET 10 215#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */
170#define GEM_GBE_SIZE 1 216#define GEM_GBE_SIZE 1
171#define GEM_CLK_OFFSET 18 217#define GEM_CLK_OFFSET 18 /* MDC clock division */
172#define GEM_CLK_SIZE 3 218#define GEM_CLK_SIZE 3
173#define GEM_DBW_OFFSET 21 219#define GEM_DBW_OFFSET 21 /* Data bus width */
174#define GEM_DBW_SIZE 2 220#define GEM_DBW_SIZE 2
175#define GEM_RXCOEN_OFFSET 24 221#define GEM_RXCOEN_OFFSET 24
176#define GEM_RXCOEN_SIZE 1 222#define GEM_RXCOEN_SIZE 1
177 223
178/* Constants for data bus width. */ 224/* Constants for data bus width. */
179#define GEM_DBW32 0 225#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */
180#define GEM_DBW64 1 226#define GEM_DBW64 1 /* 64 bit AMBA AHB data bus width */
181#define GEM_DBW128 2 227#define GEM_DBW128 2 /* 128 bit AMBA AHB data bus width */
182 228
183/* Bitfields in DMACFG. */ 229/* Bitfields in DMACFG. */
184#define GEM_FBLDO_OFFSET 0 230#define GEM_FBLDO_OFFSET 0 /* fixed burst length for DMA */
185#define GEM_FBLDO_SIZE 5 231#define GEM_FBLDO_SIZE 5
186#define GEM_ENDIA_OFFSET 7 232#define GEM_ENDIA_OFFSET 7 /* endian swap mode for packet data access */
187#define GEM_ENDIA_SIZE 1 233#define GEM_ENDIA_SIZE 1
188#define GEM_RXBMS_OFFSET 8 234#define GEM_RXBMS_OFFSET 8 /* RX packet buffer memory size select */
189#define GEM_RXBMS_SIZE 2 235#define GEM_RXBMS_SIZE 2
190#define GEM_TXPBMS_OFFSET 10 236#define GEM_TXPBMS_OFFSET 10 /* TX packet buffer memory size select */
191#define GEM_TXPBMS_SIZE 1 237#define GEM_TXPBMS_SIZE 1
192#define GEM_TXCOEN_OFFSET 11 238#define GEM_TXCOEN_OFFSET 11 /* TX IP/TCP/UDP checksum gen offload */
193#define GEM_TXCOEN_SIZE 1 239#define GEM_TXCOEN_SIZE 1
194#define GEM_RXBS_OFFSET 16 240#define GEM_RXBS_OFFSET 16 /* DMA receive buffer size */
195#define GEM_RXBS_SIZE 8 241#define GEM_RXBS_SIZE 8
196#define GEM_DDRP_OFFSET 24 242#define GEM_DDRP_OFFSET 24 /* disc_when_no_ahb */
197#define GEM_DDRP_SIZE 1 243#define GEM_DDRP_SIZE 1
198 244
199 245
200/* Bitfields in NSR */ 246/* Bitfields in NSR */
201#define MACB_NSR_LINK_OFFSET 0 247#define MACB_NSR_LINK_OFFSET 0 /* pcs_link_state */
202#define MACB_NSR_LINK_SIZE 1 248#define MACB_NSR_LINK_SIZE 1
203#define MACB_MDIO_OFFSET 1 249#define MACB_MDIO_OFFSET 1 /* status of the mdio_in pin */
204#define MACB_MDIO_SIZE 1 250#define MACB_MDIO_SIZE 1
205#define MACB_IDLE_OFFSET 2 251#define MACB_IDLE_OFFSET 2 /* The PHY management logic is idle */
206#define MACB_IDLE_SIZE 1 252#define MACB_IDLE_SIZE 1
207 253
208/* Bitfields in TSR */ 254/* Bitfields in TSR */
209#define MACB_UBR_OFFSET 0 255#define MACB_UBR_OFFSET 0 /* Used bit read */
210#define MACB_UBR_SIZE 1 256#define MACB_UBR_SIZE 1
211#define MACB_COL_OFFSET 1 257#define MACB_COL_OFFSET 1 /* Collision occurred */
212#define MACB_COL_SIZE 1 258#define MACB_COL_SIZE 1
213#define MACB_TSR_RLE_OFFSET 2 259#define MACB_TSR_RLE_OFFSET 2 /* Retry limit exceeded */
214#define MACB_TSR_RLE_SIZE 1 260#define MACB_TSR_RLE_SIZE 1
215#define MACB_TGO_OFFSET 3 261#define MACB_TGO_OFFSET 3 /* Transmit go */
216#define MACB_TGO_SIZE 1 262#define MACB_TGO_SIZE 1
217#define MACB_BEX_OFFSET 4 263#define MACB_BEX_OFFSET 4 /* TX frame corruption due to AHB error */
218#define MACB_BEX_SIZE 1 264#define MACB_BEX_SIZE 1
219#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */ 265#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */
220#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */ 266#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */
221#define MACB_COMP_OFFSET 5 267#define MACB_COMP_OFFSET 5 /* Trnasmit complete */
222#define MACB_COMP_SIZE 1 268#define MACB_COMP_SIZE 1
223#define MACB_UND_OFFSET 6 269#define MACB_UND_OFFSET 6 /* Trnasmit under run */
224#define MACB_UND_SIZE 1 270#define MACB_UND_SIZE 1
225 271
226/* Bitfields in RSR */ 272/* Bitfields in RSR */
227#define MACB_BNA_OFFSET 0 273#define MACB_BNA_OFFSET 0 /* Buffer not available */
228#define MACB_BNA_SIZE 1 274#define MACB_BNA_SIZE 1
229#define MACB_REC_OFFSET 1 275#define MACB_REC_OFFSET 1 /* Frame received */
230#define MACB_REC_SIZE 1 276#define MACB_REC_SIZE 1
231#define MACB_OVR_OFFSET 2 277#define MACB_OVR_OFFSET 2 /* Receive overrun */
232#define MACB_OVR_SIZE 1 278#define MACB_OVR_SIZE 1
233 279
234/* Bitfields in ISR/IER/IDR/IMR */ 280/* Bitfields in ISR/IER/IDR/IMR */
235#define MACB_MFD_OFFSET 0 281#define MACB_MFD_OFFSET 0 /* Management frame sent */
236#define MACB_MFD_SIZE 1 282#define MACB_MFD_SIZE 1
237#define MACB_RCOMP_OFFSET 1 283#define MACB_RCOMP_OFFSET 1 /* Receive complete */
238#define MACB_RCOMP_SIZE 1 284#define MACB_RCOMP_SIZE 1
239#define MACB_RXUBR_OFFSET 2 285#define MACB_RXUBR_OFFSET 2 /* RX used bit read */
240#define MACB_RXUBR_SIZE 1 286#define MACB_RXUBR_SIZE 1
241#define MACB_TXUBR_OFFSET 3 287#define MACB_TXUBR_OFFSET 3 /* TX used bit read */
242#define MACB_TXUBR_SIZE 1 288#define MACB_TXUBR_SIZE 1
243#define MACB_ISR_TUND_OFFSET 4 289#define MACB_ISR_TUND_OFFSET 4 /* Enable TX buffer under run interrupt */
244#define MACB_ISR_TUND_SIZE 1 290#define MACB_ISR_TUND_SIZE 1
245#define MACB_ISR_RLE_OFFSET 5 291#define MACB_ISR_RLE_OFFSET 5 /* EN retry exceeded/late coll interrupt */
246#define MACB_ISR_RLE_SIZE 1 292#define MACB_ISR_RLE_SIZE 1
247#define MACB_TXERR_OFFSET 6 293#define MACB_TXERR_OFFSET 6 /* EN TX frame corrupt from error interrupt */
248#define MACB_TXERR_SIZE 1 294#define MACB_TXERR_SIZE 1
249#define MACB_TCOMP_OFFSET 7 295#define MACB_TCOMP_OFFSET 7 /* Enable transmit complete interrupt */
250#define MACB_TCOMP_SIZE 1 296#define MACB_TCOMP_SIZE 1
251#define MACB_ISR_LINK_OFFSET 9 297#define MACB_ISR_LINK_OFFSET 9 /* Enable link change interrupt */
252#define MACB_ISR_LINK_SIZE 1 298#define MACB_ISR_LINK_SIZE 1
253#define MACB_ISR_ROVR_OFFSET 10 299#define MACB_ISR_ROVR_OFFSET 10 /* Enable receive overrun interrupt */
254#define MACB_ISR_ROVR_SIZE 1 300#define MACB_ISR_ROVR_SIZE 1
255#define MACB_HRESP_OFFSET 11 301#define MACB_HRESP_OFFSET 11 /* Enable hrsep not OK interrupt */
256#define MACB_HRESP_SIZE 1 302#define MACB_HRESP_SIZE 1
257#define MACB_PFR_OFFSET 12 303#define MACB_PFR_OFFSET 12 /* Enable pause frame w/ quantum interrupt */
258#define MACB_PFR_SIZE 1 304#define MACB_PFR_SIZE 1
259#define MACB_PTZ_OFFSET 13 305#define MACB_PTZ_OFFSET 13 /* Enable pause time zero interrupt */
260#define MACB_PTZ_SIZE 1 306#define MACB_PTZ_SIZE 1
261 307
262/* Bitfields in MAN */ 308/* Bitfields in MAN */
263#define MACB_DATA_OFFSET 0 309#define MACB_DATA_OFFSET 0 /* data */
264#define MACB_DATA_SIZE 16 310#define MACB_DATA_SIZE 16
265#define MACB_CODE_OFFSET 16 311#define MACB_CODE_OFFSET 16 /* Must be written to 10 */
266#define MACB_CODE_SIZE 2 312#define MACB_CODE_SIZE 2
267#define MACB_REGA_OFFSET 18 313#define MACB_REGA_OFFSET 18 /* Register address */
268#define MACB_REGA_SIZE 5 314#define MACB_REGA_SIZE 5
269#define MACB_PHYA_OFFSET 23 315#define MACB_PHYA_OFFSET 23 /* PHY address */
270#define MACB_PHYA_SIZE 5 316#define MACB_PHYA_SIZE 5
271#define MACB_RW_OFFSET 28 317#define MACB_RW_OFFSET 28 /* Operation. 10 is read. 01 is write. */
272#define MACB_RW_SIZE 2 318#define MACB_RW_SIZE 2
273#define MACB_SOF_OFFSET 30 319#define MACB_SOF_OFFSET 30 /* Must be written to 1 for Clause 22 */
274#define MACB_SOF_SIZE 2 320#define MACB_SOF_SIZE 2
275 321
276/* Bitfields in USRIO (AVR32) */ 322/* Bitfields in USRIO (AVR32) */
277#define MACB_MII_OFFSET 0 323#define MACB_MII_OFFSET 0
@@ -286,7 +332,7 @@
286/* Bitfields in USRIO (AT91) */ 332/* Bitfields in USRIO (AT91) */
287#define MACB_RMII_OFFSET 0 333#define MACB_RMII_OFFSET 0
288#define MACB_RMII_SIZE 1 334#define MACB_RMII_SIZE 1
289#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */ 335#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */
290#define GEM_RGMII_SIZE 1 336#define GEM_RGMII_SIZE 1
291#define MACB_CLKEN_OFFSET 1 337#define MACB_CLKEN_OFFSET 1
292#define MACB_CLKEN_SIZE 1 338#define MACB_CLKEN_SIZE 1
@@ -389,8 +435,7 @@
389#define queue_writel(queue, reg, value) \ 435#define queue_writel(queue, reg, value) \
390 __raw_writel((value), (queue)->bp->regs + (queue)->reg) 436 __raw_writel((value), (queue)->bp->regs + (queue)->reg)
391 437
392/* 438/* Conditional GEM/MACB macros. These perform the operation to the correct
393 * Conditional GEM/MACB macros. These perform the operation to the correct
394 * register dependent on whether the device is a GEM or a MACB. For registers 439 * register dependent on whether the device is a GEM or a MACB. For registers
395 * and bitfields that are common across both devices, use macb_{read,write}l 440 * and bitfields that are common across both devices, use macb_{read,write}l
396 * to avoid the cost of the conditional. 441 * to avoid the cost of the conditional.
@@ -413,8 +458,7 @@
413 __v; \ 458 __v; \
414 }) 459 })
415 460
416/** 461/* struct macb_dma_desc - Hardware DMA descriptor
417 * struct macb_dma_desc - Hardware DMA descriptor
418 * @addr: DMA address of data buffer 462 * @addr: DMA address of data buffer
419 * @ctrl: Control and status bits 463 * @ctrl: Control and status bits
420 */ 464 */
@@ -503,8 +547,7 @@ struct macb_dma_desc {
503/* limit RX checksum offload to TCP and UDP packets */ 547/* limit RX checksum offload to TCP and UDP packets */
504#define GEM_RX_CSUM_CHECKED_MASK 2 548#define GEM_RX_CSUM_CHECKED_MASK 2
505 549
506/** 550/* struct macb_tx_skb - data about an skb which is being transmitted
507 * struct macb_tx_skb - data about an skb which is being transmitted
508 * @skb: skb currently being transmitted, only set for the last buffer 551 * @skb: skb currently being transmitted, only set for the last buffer
509 * of the frame 552 * of the frame
510 * @mapping: DMA address of the skb's fragment buffer 553 * @mapping: DMA address of the skb's fragment buffer
@@ -519,8 +562,7 @@ struct macb_tx_skb {
519 bool mapped_as_page; 562 bool mapped_as_page;
520}; 563};
521 564
522/* 565/* Hardware-collected statistics. Used when updating the network
523 * Hardware-collected statistics. Used when updating the network
524 * device stats by a periodic timer. 566 * device stats by a periodic timer.
525 */ 567 */
526struct macb_stats { 568struct macb_stats {
@@ -595,6 +637,107 @@ struct gem_stats {
595 u32 rx_udp_checksum_errors; 637 u32 rx_udp_checksum_errors;
596}; 638};
597 639
640/* Describes the name and offset of an individual statistic register, as
641 * returned by `ethtool -S`. Also describes which net_device_stats statistics
642 * this register should contribute to.
643 */
644struct gem_statistic {
645 char stat_string[ETH_GSTRING_LEN];
646 int offset;
647 u32 stat_bits;
648};
649
650/* Bitfield defs for net_device_stat statistics */
651#define GEM_NDS_RXERR_OFFSET 0
652#define GEM_NDS_RXLENERR_OFFSET 1
653#define GEM_NDS_RXOVERERR_OFFSET 2
654#define GEM_NDS_RXCRCERR_OFFSET 3
655#define GEM_NDS_RXFRAMEERR_OFFSET 4
656#define GEM_NDS_RXFIFOERR_OFFSET 5
657#define GEM_NDS_TXERR_OFFSET 6
658#define GEM_NDS_TXABORTEDERR_OFFSET 7
659#define GEM_NDS_TXCARRIERERR_OFFSET 8
660#define GEM_NDS_TXFIFOERR_OFFSET 9
661#define GEM_NDS_COLLISIONS_OFFSET 10
662
663#define GEM_STAT_TITLE(name, title) GEM_STAT_TITLE_BITS(name, title, 0)
664#define GEM_STAT_TITLE_BITS(name, title, bits) { \
665 .stat_string = title, \
666 .offset = GEM_##name, \
667 .stat_bits = bits \
668}
669
670/* list of gem statistic registers. The names MUST match the
671 * corresponding GEM_* definitions.
672 */
673static const struct gem_statistic gem_statistics[] = {
674 GEM_STAT_TITLE(OCTTXL, "tx_octets"), /* OCTTXH combined with OCTTXL */
675 GEM_STAT_TITLE(TXCNT, "tx_frames"),
676 GEM_STAT_TITLE(TXBCCNT, "tx_broadcast_frames"),
677 GEM_STAT_TITLE(TXMCCNT, "tx_multicast_frames"),
678 GEM_STAT_TITLE(TXPAUSECNT, "tx_pause_frames"),
679 GEM_STAT_TITLE(TX64CNT, "tx_64_byte_frames"),
680 GEM_STAT_TITLE(TX65CNT, "tx_65_127_byte_frames"),
681 GEM_STAT_TITLE(TX128CNT, "tx_128_255_byte_frames"),
682 GEM_STAT_TITLE(TX256CNT, "tx_256_511_byte_frames"),
683 GEM_STAT_TITLE(TX512CNT, "tx_512_1023_byte_frames"),
684 GEM_STAT_TITLE(TX1024CNT, "tx_1024_1518_byte_frames"),
685 GEM_STAT_TITLE(TX1519CNT, "tx_greater_than_1518_byte_frames"),
686 GEM_STAT_TITLE_BITS(TXURUNCNT, "tx_underrun",
687 GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_TXFIFOERR)),
688 GEM_STAT_TITLE_BITS(SNGLCOLLCNT, "tx_single_collision_frames",
689 GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
690 GEM_STAT_TITLE_BITS(MULTICOLLCNT, "tx_multiple_collision_frames",
691 GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
692 GEM_STAT_TITLE_BITS(EXCESSCOLLCNT, "tx_excessive_collisions",
693 GEM_BIT(NDS_TXERR)|
694 GEM_BIT(NDS_TXABORTEDERR)|
695 GEM_BIT(NDS_COLLISIONS)),
696 GEM_STAT_TITLE_BITS(LATECOLLCNT, "tx_late_collisions",
697 GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
698 GEM_STAT_TITLE(TXDEFERCNT, "tx_deferred_frames"),
699 GEM_STAT_TITLE_BITS(TXCSENSECNT, "tx_carrier_sense_errors",
700 GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
701 GEM_STAT_TITLE(OCTRXL, "rx_octets"), /* OCTRXH combined with OCTRXL */
702 GEM_STAT_TITLE(RXCNT, "rx_frames"),
703 GEM_STAT_TITLE(RXBROADCNT, "rx_broadcast_frames"),
704 GEM_STAT_TITLE(RXMULTICNT, "rx_multicast_frames"),
705 GEM_STAT_TITLE(RXPAUSECNT, "rx_pause_frames"),
706 GEM_STAT_TITLE(RX64CNT, "rx_64_byte_frames"),
707 GEM_STAT_TITLE(RX65CNT, "rx_65_127_byte_frames"),
708 GEM_STAT_TITLE(RX128CNT, "rx_128_255_byte_frames"),
709 GEM_STAT_TITLE(RX256CNT, "rx_256_511_byte_frames"),
710 GEM_STAT_TITLE(RX512CNT, "rx_512_1023_byte_frames"),
711 GEM_STAT_TITLE(RX1024CNT, "rx_1024_1518_byte_frames"),
712 GEM_STAT_TITLE(RX1519CNT, "rx_greater_than_1518_byte_frames"),
713 GEM_STAT_TITLE_BITS(RXUNDRCNT, "rx_undersized_frames",
714 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
715 GEM_STAT_TITLE_BITS(RXOVRCNT, "rx_oversize_frames",
716 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
717 GEM_STAT_TITLE_BITS(RXJABCNT, "rx_jabbers",
718 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
719 GEM_STAT_TITLE_BITS(RXFCSCNT, "rx_frame_check_sequence_errors",
720 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXCRCERR)),
721 GEM_STAT_TITLE_BITS(RXLENGTHCNT, "rx_length_field_frame_errors",
722 GEM_BIT(NDS_RXERR)),
723 GEM_STAT_TITLE_BITS(RXSYMBCNT, "rx_symbol_errors",
724 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFRAMEERR)),
725 GEM_STAT_TITLE_BITS(RXALIGNCNT, "rx_alignment_errors",
726 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)),
727 GEM_STAT_TITLE_BITS(RXRESERRCNT, "rx_resource_errors",
728 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)),
729 GEM_STAT_TITLE_BITS(RXORCNT, "rx_overruns",
730 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFIFOERR)),
731 GEM_STAT_TITLE_BITS(RXIPCCNT, "rx_ip_header_checksum_errors",
732 GEM_BIT(NDS_RXERR)),
733 GEM_STAT_TITLE_BITS(RXTCPCCNT, "rx_tcp_checksum_errors",
734 GEM_BIT(NDS_RXERR)),
735 GEM_STAT_TITLE_BITS(RXUDPCCNT, "rx_udp_checksum_errors",
736 GEM_BIT(NDS_RXERR)),
737};
738
739#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
740
598struct macb; 741struct macb;
599 742
600struct macb_or_gem_ops { 743struct macb_or_gem_ops {
@@ -673,6 +816,8 @@ struct macb {
673 dma_addr_t skb_physaddr; /* phys addr from pci_map_single */ 816 dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
674 int skb_length; /* saved skb length for pci_unmap_single */ 817 int skb_length; /* saved skb length for pci_unmap_single */
675 unsigned int max_tx_length; 818 unsigned int max_tx_length;
819
820 u64 ethtool_stats[GEM_STATS_LEN];
676}; 821};
677 822
678extern const struct ethtool_ops macb_ethtool_ops; 823extern const struct ethtool_ops macb_ethtool_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index babe2a915b00..526ea74e82d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1860,9 +1860,9 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1860 } 1860 }
1861 cpl->iff = dev->if_port; 1861 cpl->iff = dev->if_port;
1862 1862
1863 if (vlan_tx_tag_present(skb)) { 1863 if (skb_vlan_tag_present(skb)) {
1864 cpl->vlan_valid = 1; 1864 cpl->vlan_valid = 1;
1865 cpl->vlan = htons(vlan_tx_tag_get(skb)); 1865 cpl->vlan = htons(skb_vlan_tag_get(skb));
1866 st->vlan_insert++; 1866 st->vlan_insert++;
1867 } else 1867 } else
1868 cpl->vlan_valid = 0; 1868 cpl->vlan_valid = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/mc5.c b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
index e13b7fe9d082..338301b11518 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/mc5.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
@@ -97,14 +97,6 @@ static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
97 F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1); 97 F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
98} 98}
99 99
100static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
101 u32 v3)
102{
103 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
104 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
105 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
106}
107
108static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2, 100static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
109 u32 v3) 101 u32 v3)
110{ 102{
@@ -113,14 +105,6 @@ static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
113 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3); 105 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
114} 106}
115 107
116static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
117 u32 *v3)
118{
119 *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
120 *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
121 *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
122}
123
124/* 108/*
125 * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM 109 * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
126 * command cmd. The data to be written must have been set up by the caller. 110 * command cmd. The data to be written must have been set up by the caller.
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 3dfcf600fcc6..d6aa602f168d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1148,8 +1148,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1148 cpl->len = htonl(skb->len); 1148 cpl->len = htonl(skb->len);
1149 cntrl = V_TXPKT_INTF(pi->port_id); 1149 cntrl = V_TXPKT_INTF(pi->port_id);
1150 1150
1151 if (vlan_tx_tag_present(skb)) 1151 if (skb_vlan_tag_present(skb))
1152 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb)); 1152 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
1153 1153
1154 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); 1154 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1155 if (tso_info) { 1155 if (tso_info) {
@@ -1282,7 +1282,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1282 qs->port_stats[SGE_PSTAT_TX_CSUM]++; 1282 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1283 if (skb_shinfo(skb)->gso_size) 1283 if (skb_shinfo(skb)->gso_size)
1284 qs->port_stats[SGE_PSTAT_TSO]++; 1284 qs->port_stats[SGE_PSTAT_TSO]++;
1285 if (vlan_tx_tag_present(skb)) 1285 if (skb_vlan_tag_present(skb))
1286 qs->port_stats[SGE_PSTAT_VLANINS]++; 1286 qs->port_stats[SGE_PSTAT_VLANINS]++;
1287 1287
1288 /* 1288 /*
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index c74a898fcd4f..184a8d545ac4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -727,9 +727,9 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
727 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); 727 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
728 } 728 }
729 729
730 for (i = 0; i < 6; i++) 730 ret = hex2bin(p->eth_base, vpd.na_data, 6);
731 p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 + 731 if (ret < 0)
732 hex_to_bin(vpd.na_data[2 * i + 1]); 732 return -EINVAL;
733 return 0; 733 return 0;
734} 734}
735 735
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index b85280775997..ae50cd72358c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -4,6 +4,6 @@
4 4
5obj-$(CONFIG_CHELSIO_T4) += cxgb4.o 5obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
6 6
7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o 7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o
8cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o 8cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
9cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o 9cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
new file mode 100644
index 000000000000..9062a8434246
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -0,0 +1,317 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 * Copyright (C) 2003-2014 Chelsio Communications. All rights reserved.
4 *
5 * Written by Deepak (deepak.s@chelsio.com)
6 *
7 * This program is distributed in the hope that it will be useful, but WITHOUT
8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
10 * release for licensing terms and conditions.
11 */
12
13#include <linux/module.h>
14#include <linux/netdevice.h>
15#include <linux/jhash.h>
16#include <linux/if_vlan.h>
17#include <net/addrconf.h>
18#include "cxgb4.h"
19#include "clip_tbl.h"
20
21static inline unsigned int ipv4_clip_hash(struct clip_tbl *c, const u32 *key)
22{
23 unsigned int clipt_size_half = c->clipt_size / 2;
24
25 return jhash_1word(*key, 0) % clipt_size_half;
26}
27
28static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
29{
30 unsigned int clipt_size_half = d->clipt_size / 2;
31 u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
32
33 return clipt_size_half +
34 (jhash_1word(xor, 0) % clipt_size_half);
35}
36
37static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
38 int addr_len)
39{
40 return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
41 ipv6_clip_hash(ctbl, addr);
42}
43
44static int clip6_get_mbox(const struct net_device *dev,
45 const struct in6_addr *lip)
46{
47 struct adapter *adap = netdev2adap(dev);
48 struct fw_clip_cmd c;
49
50 memset(&c, 0, sizeof(c));
51 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
52 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
53 c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
54 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
55 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
56 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
57}
58
59static int clip6_release_mbox(const struct net_device *dev,
60 const struct in6_addr *lip)
61{
62 struct adapter *adap = netdev2adap(dev);
63 struct fw_clip_cmd c;
64
65 memset(&c, 0, sizeof(c));
66 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
67 FW_CMD_REQUEST_F | FW_CMD_READ_F);
68 c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
69 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
70 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
71 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
72}
73
74int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
75{
76 struct adapter *adap = netdev2adap(dev);
77 struct clip_tbl *ctbl = adap->clipt;
78 struct clip_entry *ce, *cte;
79 u32 *addr = (u32 *)lip;
80 int hash;
81 int addr_len;
82 int ret = 0;
83
84 if (!ctbl)
85 return 0;
86
87 if (v6)
88 addr_len = 16;
89 else
90 addr_len = 4;
91
92 hash = clip_addr_hash(ctbl, addr, addr_len);
93
94 read_lock_bh(&ctbl->lock);
95 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
96 if (addr_len == cte->addr_len &&
97 memcmp(lip, cte->addr, cte->addr_len) == 0) {
98 ce = cte;
99 read_unlock_bh(&ctbl->lock);
100 goto found;
101 }
102 }
103 read_unlock_bh(&ctbl->lock);
104
105 write_lock_bh(&ctbl->lock);
106 if (!list_empty(&ctbl->ce_free_head)) {
107 ce = list_first_entry(&ctbl->ce_free_head,
108 struct clip_entry, list);
109 list_del(&ce->list);
110 INIT_LIST_HEAD(&ce->list);
111 spin_lock_init(&ce->lock);
112 atomic_set(&ce->refcnt, 0);
113 atomic_dec(&ctbl->nfree);
114 ce->addr_len = addr_len;
115 memcpy(ce->addr, lip, addr_len);
116 list_add_tail(&ce->list, &ctbl->hash_list[hash]);
117 if (v6) {
118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
119 if (ret) {
120 write_unlock_bh(&ctbl->lock);
121 return ret;
122 }
123 }
124 } else {
125 write_unlock_bh(&ctbl->lock);
126 return -ENOMEM;
127 }
128 write_unlock_bh(&ctbl->lock);
129found:
130 atomic_inc(&ce->refcnt);
131
132 return 0;
133}
134EXPORT_SYMBOL(cxgb4_clip_get);
135
136void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
137{
138 struct adapter *adap = netdev2adap(dev);
139 struct clip_tbl *ctbl = adap->clipt;
140 struct clip_entry *ce, *cte;
141 u32 *addr = (u32 *)lip;
142 int hash;
143 int addr_len;
144
145 if (v6)
146 addr_len = 16;
147 else
148 addr_len = 4;
149
150 hash = clip_addr_hash(ctbl, addr, addr_len);
151
152 read_lock_bh(&ctbl->lock);
153 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
154 if (addr_len == cte->addr_len &&
155 memcmp(lip, cte->addr, cte->addr_len) == 0) {
156 ce = cte;
157 read_unlock_bh(&ctbl->lock);
158 goto found;
159 }
160 }
161 read_unlock_bh(&ctbl->lock);
162
163 return;
164found:
165 write_lock_bh(&ctbl->lock);
166 spin_lock_bh(&ce->lock);
167 if (atomic_dec_and_test(&ce->refcnt)) {
168 list_del(&ce->list);
169 INIT_LIST_HEAD(&ce->list);
170 list_add_tail(&ce->list, &ctbl->ce_free_head);
171 atomic_inc(&ctbl->nfree);
172 if (v6)
173 clip6_release_mbox(dev, (const struct in6_addr *)lip);
174 }
175 spin_unlock_bh(&ce->lock);
176 write_unlock_bh(&ctbl->lock);
177}
178EXPORT_SYMBOL(cxgb4_clip_release);
179
180/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
181 * a physical device.
182 * The physical device reference is needed to send the actul CLIP command.
183 */
184static int cxgb4_update_dev_clip(struct net_device *root_dev,
185 struct net_device *dev)
186{
187 struct inet6_dev *idev = NULL;
188 struct inet6_ifaddr *ifa;
189 int ret = 0;
190
191 idev = __in6_dev_get(root_dev);
192 if (!idev)
193 return ret;
194
195 read_lock_bh(&idev->lock);
196 list_for_each_entry(ifa, &idev->addr_list, if_list) {
197 ret = cxgb4_clip_get(dev, (const u32 *)ifa->addr.s6_addr, 1);
198 if (ret < 0)
199 break;
200 }
201 read_unlock_bh(&idev->lock);
202
203 return ret;
204}
205
206int cxgb4_update_root_dev_clip(struct net_device *dev)
207{
208 struct net_device *root_dev = NULL;
209 int i, ret = 0;
210
211 /* First populate the real net device's IPv6 addresses */
212 ret = cxgb4_update_dev_clip(dev, dev);
213 if (ret)
214 return ret;
215
216 /* Parse all bond and vlan devices layered on top of the physical dev */
217 root_dev = netdev_master_upper_dev_get_rcu(dev);
218 if (root_dev) {
219 ret = cxgb4_update_dev_clip(root_dev, dev);
220 if (ret)
221 return ret;
222 }
223
224 for (i = 0; i < VLAN_N_VID; i++) {
225 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
226 if (!root_dev)
227 continue;
228
229 ret = cxgb4_update_dev_clip(root_dev, dev);
230 if (ret)
231 break;
232 }
233
234 return ret;
235}
236EXPORT_SYMBOL(cxgb4_update_root_dev_clip);
237
238int clip_tbl_show(struct seq_file *seq, void *v)
239{
240 struct adapter *adapter = seq->private;
241 struct clip_tbl *ctbl = adapter->clipt;
242 struct clip_entry *ce;
243 char ip[60];
244 int i;
245
246 read_lock_bh(&ctbl->lock);
247
248 seq_puts(seq, "IP Address Users\n");
249 for (i = 0 ; i < ctbl->clipt_size; ++i) {
250 list_for_each_entry(ce, &ctbl->hash_list[i], list) {
251 ip[0] = '\0';
252 if (ce->addr_len == 16)
253 sprintf(ip, "%pI6c", ce->addr);
254 else
255 sprintf(ip, "%pI4c", ce->addr);
256 seq_printf(seq, "%-25s %u\n", ip,
257 atomic_read(&ce->refcnt));
258 }
259 }
260 seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree));
261
262 read_unlock_bh(&ctbl->lock);
263
264 return 0;
265}
266
267struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
268 unsigned int clipt_end)
269{
270 struct clip_entry *cl_list;
271 struct clip_tbl *ctbl;
272 unsigned int clipt_size;
273 int i;
274
275 if (clipt_start >= clipt_end)
276 return NULL;
277 clipt_size = clipt_end - clipt_start + 1;
278 if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
279 return NULL;
280
281 ctbl = t4_alloc_mem(sizeof(*ctbl) +
282 clipt_size*sizeof(struct list_head));
283 if (!ctbl)
284 return NULL;
285
286 ctbl->clipt_start = clipt_start;
287 ctbl->clipt_size = clipt_size;
288 INIT_LIST_HEAD(&ctbl->ce_free_head);
289
290 atomic_set(&ctbl->nfree, clipt_size);
291 rwlock_init(&ctbl->lock);
292
293 for (i = 0; i < ctbl->clipt_size; ++i)
294 INIT_LIST_HEAD(&ctbl->hash_list[i]);
295
296 cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry));
297 ctbl->cl_list = (void *)cl_list;
298
299 for (i = 0; i < clipt_size; i++) {
300 INIT_LIST_HEAD(&cl_list[i].list);
301 list_add_tail(&cl_list[i].list, &ctbl->ce_free_head);
302 }
303
304 return ctbl;
305}
306
307void t4_cleanup_clip_tbl(struct adapter *adap)
308{
309 struct clip_tbl *ctbl = adap->clipt;
310
311 if (ctbl) {
312 if (ctbl->cl_list)
313 t4_free_mem(ctbl->cl_list);
314 t4_free_mem(ctbl);
315 }
316}
317EXPORT_SYMBOL(t4_cleanup_clip_tbl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
new file mode 100644
index 000000000000..2eaba0161cf8
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -0,0 +1,41 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 * Copyright (C) 2003-2014 Chelsio Communications. All rights reserved.
4 *
5 * Written by Deepak (deepak.s@chelsio.com)
6 *
7 * This program is distributed in the hope that it will be useful, but WITHOUT
8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
10 * release for licensing terms and conditions.
11 */
12
13struct clip_entry {
14 spinlock_t lock; /* Hold while modifying clip reference */
15 atomic_t refcnt;
16 struct list_head list;
17 u32 addr[4];
18 int addr_len;
19};
20
21struct clip_tbl {
22 unsigned int clipt_start;
23 unsigned int clipt_size;
24 rwlock_t lock;
25 atomic_t nfree;
26 struct list_head ce_free_head;
27 void *cl_list;
28 struct list_head hash_list[0];
29};
30
31enum {
32 CLIPT_MIN_HASH_BUCKETS = 2,
33};
34
35struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
36 unsigned int clipt_end);
37int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6);
38void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6);
39int clip_tbl_show(struct seq_file *seq, void *v);
40int cxgb4_update_root_dev_clip(struct net_device *dev);
41void t4_cleanup_clip_tbl(struct adapter *adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 5ab5c3133acd..d6cda17efe6e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -49,16 +49,6 @@
49#include <asm/io.h> 49#include <asm/io.h>
50#include "cxgb4_uld.h" 50#include "cxgb4_uld.h"
51 51
52#define T4FW_VERSION_MAJOR 0x01
53#define T4FW_VERSION_MINOR 0x0C
54#define T4FW_VERSION_MICRO 0x19
55#define T4FW_VERSION_BUILD 0x00
56
57#define T5FW_VERSION_MAJOR 0x01
58#define T5FW_VERSION_MINOR 0x0C
59#define T5FW_VERSION_MICRO 0x19
60#define T5FW_VERSION_BUILD 0x00
61
62#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) 52#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
63 53
64enum { 54enum {
@@ -231,6 +221,7 @@ struct sge_params {
231struct tp_params { 221struct tp_params {
232 unsigned int ntxchan; /* # of Tx channels */ 222 unsigned int ntxchan; /* # of Tx channels */
233 unsigned int tre; /* log2 of core clocks per TP tick */ 223 unsigned int tre; /* log2 of core clocks per TP tick */
224 unsigned int la_mask; /* what events are recorded by TP LA */
234 unsigned short tx_modq_map; /* TX modulation scheduler queue to */ 225 unsigned short tx_modq_map; /* TX modulation scheduler queue to */
235 /* channel map */ 226 /* channel map */
236 227
@@ -290,11 +281,21 @@ enum chip_type {
290 T5_LAST_REV = T5_A1, 281 T5_LAST_REV = T5_A1,
291}; 282};
292 283
284struct devlog_params {
285 u32 memtype; /* which memory (EDC0, EDC1, MC) */
286 u32 start; /* start of log in firmware memory */
287 u32 size; /* size of log */
288};
289
293struct adapter_params { 290struct adapter_params {
294 struct sge_params sge; 291 struct sge_params sge;
295 struct tp_params tp; 292 struct tp_params tp;
296 struct vpd_params vpd; 293 struct vpd_params vpd;
297 struct pci_params pci; 294 struct pci_params pci;
295 struct devlog_params devlog;
296 enum pcie_memwin drv_memwin;
297
298 unsigned int cim_la_size;
298 299
299 unsigned int sf_size; /* serial flash size in bytes */ 300 unsigned int sf_size; /* serial flash size in bytes */
300 unsigned int sf_nsec; /* # of flash sectors */ 301 unsigned int sf_nsec; /* # of flash sectors */
@@ -476,6 +477,22 @@ struct sge_rspq { /* state for an SGE response queue */
476 struct adapter *adap; 477 struct adapter *adap;
477 struct net_device *netdev; /* associated net device */ 478 struct net_device *netdev; /* associated net device */
478 rspq_handler_t handler; 479 rspq_handler_t handler;
480#ifdef CONFIG_NET_RX_BUSY_POLL
481#define CXGB_POLL_STATE_IDLE 0
482#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */
483#define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */
484#define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */
485#define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */
486#define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \
487 CXGB_POLL_STATE_POLL_YIELD)
488#define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \
489 CXGB_POLL_STATE_POLL)
490#define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \
491 CXGB_POLL_STATE_POLL_YIELD)
492 unsigned int bpoll_state;
493 spinlock_t bpoll_lock; /* lock for busy poll */
494#endif /* CONFIG_NET_RX_BUSY_POLL */
495
479}; 496};
480 497
481struct sge_eth_stats { /* Ethernet queue statistics */ 498struct sge_eth_stats { /* Ethernet queue statistics */
@@ -658,6 +675,9 @@ struct adapter {
658 unsigned int l2t_start; 675 unsigned int l2t_start;
659 unsigned int l2t_end; 676 unsigned int l2t_end;
660 struct l2t_data *l2t; 677 struct l2t_data *l2t;
678 unsigned int clipt_start;
679 unsigned int clipt_end;
680 struct clip_tbl *clipt;
661 void *uld_handle[CXGB4_ULD_MAX]; 681 void *uld_handle[CXGB4_ULD_MAX];
662 struct list_head list_node; 682 struct list_head list_node;
663 struct list_head rcu_node; 683 struct list_head rcu_node;
@@ -877,6 +897,102 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
877 return netdev2pinfo(dev)->adapter; 897 return netdev2pinfo(dev)->adapter;
878} 898}
879 899
900#ifdef CONFIG_NET_RX_BUSY_POLL
901static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
902{
903 spin_lock_init(&q->bpoll_lock);
904 q->bpoll_state = CXGB_POLL_STATE_IDLE;
905}
906
907static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
908{
909 bool rc = true;
910
911 spin_lock(&q->bpoll_lock);
912 if (q->bpoll_state & CXGB_POLL_LOCKED) {
913 q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD;
914 rc = false;
915 } else {
916 q->bpoll_state = CXGB_POLL_STATE_NAPI;
917 }
918 spin_unlock(&q->bpoll_lock);
919 return rc;
920}
921
922static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
923{
924 bool rc = false;
925
926 spin_lock(&q->bpoll_lock);
927 if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
928 rc = true;
929 q->bpoll_state = CXGB_POLL_STATE_IDLE;
930 spin_unlock(&q->bpoll_lock);
931 return rc;
932}
933
934static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
935{
936 bool rc = true;
937
938 spin_lock_bh(&q->bpoll_lock);
939 if (q->bpoll_state & CXGB_POLL_LOCKED) {
940 q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD;
941 rc = false;
942 } else {
943 q->bpoll_state |= CXGB_POLL_STATE_POLL;
944 }
945 spin_unlock_bh(&q->bpoll_lock);
946 return rc;
947}
948
949static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
950{
951 bool rc = false;
952
953 spin_lock_bh(&q->bpoll_lock);
954 if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
955 rc = true;
956 q->bpoll_state = CXGB_POLL_STATE_IDLE;
957 spin_unlock_bh(&q->bpoll_lock);
958 return rc;
959}
960
961static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
962{
963 return q->bpoll_state & CXGB_POLL_USER_PEND;
964}
965#else
966static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
967{
968}
969
970static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
971{
972 return true;
973}
974
975static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
976{
977 return false;
978}
979
980static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
981{
982 return false;
983}
984
985static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
986{
987 return false;
988}
989
990static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
991{
992 return false;
993}
994#endif /* CONFIG_NET_RX_BUSY_POLL */
995
880void t4_os_portmod_changed(const struct adapter *adap, int port_id); 996void t4_os_portmod_changed(const struct adapter *adap, int port_id);
881void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); 997void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
882 998
@@ -905,6 +1021,7 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
905int t4_sge_init(struct adapter *adap); 1021int t4_sge_init(struct adapter *adap);
906void t4_sge_start(struct adapter *adap); 1022void t4_sge_start(struct adapter *adap);
907void t4_sge_stop(struct adapter *adap); 1023void t4_sge_stop(struct adapter *adap);
1024int cxgb_busy_poll(struct napi_struct *napi);
908extern int dbfifo_int_thresh; 1025extern int dbfifo_int_thresh;
909 1026
910#define for_each_port(adapter, iter) \ 1027#define for_each_port(adapter, iter) \
@@ -995,12 +1112,16 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
995 1112
996int t4_seeprom_wp(struct adapter *adapter, bool enable); 1113int t4_seeprom_wp(struct adapter *adapter, bool enable);
997int get_vpd_params(struct adapter *adapter, struct vpd_params *p); 1114int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
1115int t4_read_flash(struct adapter *adapter, unsigned int addr,
1116 unsigned int nwords, u32 *data, int byte_oriented);
998int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 1117int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
1118int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
999int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 1119int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
1000 const u8 *fw_data, unsigned int size, int force); 1120 const u8 *fw_data, unsigned int size, int force);
1001unsigned int t4_flash_cfg_addr(struct adapter *adapter); 1121unsigned int t4_flash_cfg_addr(struct adapter *adapter);
1002int t4_get_fw_version(struct adapter *adapter, u32 *vers); 1122int t4_get_fw_version(struct adapter *adapter, u32 *vers);
1003int t4_get_tp_version(struct adapter *adapter, u32 *vers); 1123int t4_get_tp_version(struct adapter *adapter, u32 *vers);
1124int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
1004int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, 1125int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1005 const u8 *fw_data, unsigned int fw_size, 1126 const u8 *fw_data, unsigned int fw_size,
1006 struct fw_hdr *card_fw, enum dev_state state, int *reset); 1127 struct fw_hdr *card_fw, enum dev_state state, int *reset);
@@ -1013,6 +1134,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
1013 u64 *pbar2_qoffset, 1134 u64 *pbar2_qoffset,
1014 unsigned int *pbar2_qid); 1135 unsigned int *pbar2_qid);
1015 1136
1137unsigned int qtimer_val(const struct adapter *adap,
1138 const struct sge_rspq *q);
1016int t4_init_sge_params(struct adapter *adapter); 1139int t4_init_sge_params(struct adapter *adapter);
1017int t4_init_tp_params(struct adapter *adap); 1140int t4_init_tp_params(struct adapter *adap);
1018int t4_filter_field_shift(const struct adapter *adap, int filter_sel); 1141int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
@@ -1022,20 +1145,46 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1022 int start, int n, const u16 *rspq, unsigned int nrspq); 1145 int start, int n, const u16 *rspq, unsigned int nrspq);
1023int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 1146int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1024 unsigned int flags); 1147 unsigned int flags);
1148int t4_read_rss(struct adapter *adapter, u16 *entries);
1149void t4_read_rss_key(struct adapter *adapter, u32 *key);
1150void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
1151void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
1152 u32 *valp);
1153void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
1154 u32 *vfl, u32 *vfh);
1155u32 t4_read_rss_pf_map(struct adapter *adapter);
1156u32 t4_read_rss_pf_mask(struct adapter *adapter);
1157
1025int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 1158int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
1026 u64 *parity); 1159 u64 *parity);
1027int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 1160int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
1028 u64 *parity); 1161 u64 *parity);
1162void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
1163void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
1164int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
1165 size_t n);
1166int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data,
1167 size_t n);
1168int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1169 unsigned int *valp);
1170int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1171 const unsigned int *valp);
1172int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
1173void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
1029const char *t4_get_port_type_description(enum fw_port_type port_type); 1174const char *t4_get_port_type_description(enum fw_port_type port_type);
1030void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); 1175void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
1031void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); 1176void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
1177void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
1032void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 1178void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1033 unsigned int mask, unsigned int val); 1179 unsigned int mask, unsigned int val);
1180void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
1034void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 1181void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1035 struct tp_tcp_stats *v6); 1182 struct tp_tcp_stats *v6);
1036void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 1183void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1037 const unsigned short *alpha, const unsigned short *beta); 1184 const unsigned short *alpha, const unsigned short *beta);
1038 1185
1186void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
1187
1039void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid); 1188void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
1040 1189
1041void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 1190void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index a35d1ec6950e..6074680bc985 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -22,7 +22,7 @@
22 22
23/* DCBx version control 23/* DCBx version control
24 */ 24 */
25char *dcb_ver_array[] = { 25static const char * const dcb_ver_array[] = {
26 "Unknown", 26 "Unknown",
27 "DCBx-CIN", 27 "DCBx-CIN",
28 "DCBx-CEE 1.01", 28 "DCBx-CEE 1.01",
@@ -428,7 +428,10 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
428 } 428 }
429 *pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf; 429 *pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf;
430 430
431 INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); 431 if (local)
432 INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
433 else
434 INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
432 pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; 435 pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
433 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); 436 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
434 if (err != FW_PORT_DCB_CFG_SUCCESS) { 437 if (err != FW_PORT_DCB_CFG_SUCCESS) {
@@ -900,6 +903,88 @@ cxgb4_ieee_negotiation_complete(struct net_device *dev,
900 (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); 903 (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
901} 904}
902 905
906static int cxgb4_ieee_read_ets(struct net_device *dev, struct ieee_ets *ets,
907 int local)
908{
909 struct port_info *pi = netdev2pinfo(dev);
910 struct port_dcb_info *dcb = &pi->dcb;
911 struct adapter *adap = pi->adapter;
912 uint32_t tc_info;
913 struct fw_port_cmd pcmd;
914 int i, bwg, err;
915
916 if (!(dcb->msgs & (CXGB4_DCB_FW_PGID | CXGB4_DCB_FW_PGRATE)))
917 return 0;
918
919 ets->ets_cap = dcb->pg_num_tcs_supported;
920
921 if (local) {
922 ets->willing = 1;
923 INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
924 } else {
925 INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
926 }
927
928 pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
929 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
930 if (err != FW_PORT_DCB_CFG_SUCCESS) {
931 dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
932 return err;
933 }
934
935 tc_info = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
936
937 if (local)
938 INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
939 else
940 INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
941
942 pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
943 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
944 if (err != FW_PORT_DCB_CFG_SUCCESS) {
945 dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
946 -err);
947 return err;
948 }
949
950 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
951 bwg = (tc_info >> ((7 - i) * 4)) & 0xF;
952 ets->prio_tc[i] = bwg;
953 ets->tc_tx_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
954 ets->tc_rx_bw[i] = ets->tc_tx_bw[i];
955 ets->tc_tsa[i] = pcmd.u.dcb.pgrate.tsa[i];
956 }
957
958 return 0;
959}
960
961static int cxgb4_ieee_get_ets(struct net_device *dev, struct ieee_ets *ets)
962{
963 return cxgb4_ieee_read_ets(dev, ets, 1);
964}
965
966/* We reuse this for peer PFC as well, as we can't have it enabled one way */
967static int cxgb4_ieee_get_pfc(struct net_device *dev, struct ieee_pfc *pfc)
968{
969 struct port_info *pi = netdev2pinfo(dev);
970 struct port_dcb_info *dcb = &pi->dcb;
971
972 memset(pfc, 0, sizeof(struct ieee_pfc));
973
974 if (!(dcb->msgs & CXGB4_DCB_FW_PFC))
975 return 0;
976
977 pfc->pfc_cap = dcb->pfc_num_tcs_supported;
978 pfc->pfc_en = bitswap_1(dcb->pfcen);
979
980 return 0;
981}
982
983static int cxgb4_ieee_peer_ets(struct net_device *dev, struct ieee_ets *ets)
984{
985 return cxgb4_ieee_read_ets(dev, ets, 0);
986}
987
903/* Fill in the Application User Priority Map associated with the 988/* Fill in the Application User Priority Map associated with the
904 * specified Application. 989 * specified Application.
905 * Priority for IEEE dcb_app is an integer, with 0 being a valid value 990 * Priority for IEEE dcb_app is an integer, with 0 being a valid value
@@ -1106,14 +1191,23 @@ static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
1106 struct port_info *pi = netdev2pinfo(dev); 1191 struct port_info *pi = netdev2pinfo(dev);
1107 1192
1108 cxgb4_getnumtcs(dev, DCB_NUMTCS_ATTR_PFC, &(pfc->tcs_supported)); 1193 cxgb4_getnumtcs(dev, DCB_NUMTCS_ATTR_PFC, &(pfc->tcs_supported));
1109 pfc->pfc_en = pi->dcb.pfcen; 1194
1195 /* Firmware sends this to us in a formwat that is a bit flipped version
1196 * of spec, correct it before we send it to host. This is taken care of
1197 * by bit shifting in other uses of pfcen
1198 */
1199 pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
1110 1200
1111 return 0; 1201 return 0;
1112} 1202}
1113 1203
1114const struct dcbnl_rtnl_ops cxgb4_dcb_ops = { 1204const struct dcbnl_rtnl_ops cxgb4_dcb_ops = {
1205 .ieee_getets = cxgb4_ieee_get_ets,
1206 .ieee_getpfc = cxgb4_ieee_get_pfc,
1115 .ieee_getapp = cxgb4_ieee_getapp, 1207 .ieee_getapp = cxgb4_ieee_getapp,
1116 .ieee_setapp = cxgb4_ieee_setapp, 1208 .ieee_setapp = cxgb4_ieee_setapp,
1209 .ieee_peer_getets = cxgb4_ieee_peer_ets,
1210 .ieee_peer_getpfc = cxgb4_ieee_get_pfc,
1117 1211
1118 /* CEE std */ 1212 /* CEE std */
1119 .getstate = cxgb4_getstate, 1213 .getstate = cxgb4_getstate,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index 31ce425616c9..ccf24d3dc982 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -136,6 +136,17 @@ void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *);
136void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *); 136void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *);
137extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops; 137extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops;
138 138
139static inline __u8 bitswap_1(unsigned char val)
140{
141 return ((val & 0x80) >> 7) |
142 ((val & 0x40) >> 5) |
143 ((val & 0x20) >> 3) |
144 ((val & 0x10) >> 1) |
145 ((val & 0x08) << 1) |
146 ((val & 0x04) << 3) |
147 ((val & 0x02) << 5) |
148 ((val & 0x01) << 7);
149}
139#define CXGB4_DCB_ENABLED true 150#define CXGB4_DCB_ENABLED true
140 151
141#else /* !CONFIG_CHELSIO_T4_DCB */ 152#else /* !CONFIG_CHELSIO_T4_DCB */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index c98a350d857e..d221f6b28fcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -36,13 +36,1867 @@
36#include <linux/debugfs.h> 36#include <linux/debugfs.h>
37#include <linux/string_helpers.h> 37#include <linux/string_helpers.h>
38#include <linux/sort.h> 38#include <linux/sort.h>
39#include <linux/ctype.h>
39 40
40#include "cxgb4.h" 41#include "cxgb4.h"
41#include "t4_regs.h" 42#include "t4_regs.h"
43#include "t4_values.h"
42#include "t4fw_api.h" 44#include "t4fw_api.h"
43#include "cxgb4_debugfs.h" 45#include "cxgb4_debugfs.h"
46#include "clip_tbl.h"
44#include "l2t.h" 47#include "l2t.h"
45 48
49/* generic seq_file support for showing a table of size rows x width. */
50static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
51{
52 pos -= tb->skip_first;
53 return pos >= tb->rows ? NULL : &tb->data[pos * tb->width];
54}
55
56static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
57{
58 struct seq_tab *tb = seq->private;
59
60 if (tb->skip_first && *pos == 0)
61 return SEQ_START_TOKEN;
62
63 return seq_tab_get_idx(tb, *pos);
64}
65
66static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
67{
68 v = seq_tab_get_idx(seq->private, *pos + 1);
69 if (v)
70 ++*pos;
71 return v;
72}
73
74static void seq_tab_stop(struct seq_file *seq, void *v)
75{
76}
77
78static int seq_tab_show(struct seq_file *seq, void *v)
79{
80 const struct seq_tab *tb = seq->private;
81
82 return tb->show(seq, v, ((char *)v - tb->data) / tb->width);
83}
84
85static const struct seq_operations seq_tab_ops = {
86 .start = seq_tab_start,
87 .next = seq_tab_next,
88 .stop = seq_tab_stop,
89 .show = seq_tab_show
90};
91
92struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
93 unsigned int width, unsigned int have_header,
94 int (*show)(struct seq_file *seq, void *v, int i))
95{
96 struct seq_tab *p;
97
98 p = __seq_open_private(f, &seq_tab_ops, sizeof(*p) + rows * width);
99 if (p) {
100 p->show = show;
101 p->rows = rows;
102 p->width = width;
103 p->skip_first = have_header != 0;
104 }
105 return p;
106}
107
108/* Trim the size of a seq_tab to the supplied number of rows. The operation is
109 * irreversible.
110 */
111static int seq_tab_trim(struct seq_tab *p, unsigned int new_rows)
112{
113 if (new_rows > p->rows)
114 return -EINVAL;
115 p->rows = new_rows;
116 return 0;
117}
118
119static int cim_la_show(struct seq_file *seq, void *v, int idx)
120{
121 if (v == SEQ_START_TOKEN)
122 seq_puts(seq, "Status Data PC LS0Stat LS0Addr "
123 " LS0Data\n");
124 else {
125 const u32 *p = v;
126
127 seq_printf(seq,
128 " %02x %x%07x %x%07x %08x %08x %08x%08x%08x%08x\n",
129 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
130 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
131 p[6], p[7]);
132 }
133 return 0;
134}
135
136static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
137{
138 if (v == SEQ_START_TOKEN) {
139 seq_puts(seq, "Status Data PC\n");
140 } else {
141 const u32 *p = v;
142
143 seq_printf(seq, " %02x %08x %08x\n", p[5] & 0xff, p[6],
144 p[7]);
145 seq_printf(seq, " %02x %02x%06x %02x%06x\n",
146 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
147 p[4] & 0xff, p[5] >> 8);
148 seq_printf(seq, " %02x %x%07x %x%07x\n", (p[0] >> 4) & 0xff,
149 p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4);
150 }
151 return 0;
152}
153
154static int cim_la_open(struct inode *inode, struct file *file)
155{
156 int ret;
157 unsigned int cfg;
158 struct seq_tab *p;
159 struct adapter *adap = inode->i_private;
160
161 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
162 if (ret)
163 return ret;
164
165 p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
166 cfg & UPDBGLACAPTPCONLY_F ?
167 cim_la_show_3in1 : cim_la_show);
168 if (!p)
169 return -ENOMEM;
170
171 ret = t4_cim_read_la(adap, (u32 *)p->data, NULL);
172 if (ret)
173 seq_release_private(inode, file);
174 return ret;
175}
176
177static const struct file_operations cim_la_fops = {
178 .owner = THIS_MODULE,
179 .open = cim_la_open,
180 .read = seq_read,
181 .llseek = seq_lseek,
182 .release = seq_release_private
183};
184
185static int cim_qcfg_show(struct seq_file *seq, void *v)
186{
187 static const char * const qname[] = {
188 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",
189 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",
190 "SGE0-RX", "SGE1-RX"
191 };
192
193 int i;
194 struct adapter *adap = seq->private;
195 u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
196 u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
197 u32 stat[(4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5))];
198 u16 thres[CIM_NUM_IBQ];
199 u32 obq_wr_t4[2 * CIM_NUM_OBQ], *wr;
200 u32 obq_wr_t5[2 * CIM_NUM_OBQ_T5];
201 u32 *p = stat;
202 int cim_num_obq = is_t4(adap->params.chip) ?
203 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
204
205 i = t4_cim_read(adap, is_t4(adap->params.chip) ? UP_IBQ_0_RDADDR_A :
206 UP_IBQ_0_SHADOW_RDADDR_A,
207 ARRAY_SIZE(stat), stat);
208 if (!i) {
209 if (is_t4(adap->params.chip)) {
210 i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
211 ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
212 wr = obq_wr_t4;
213 } else {
214 i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
215 ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
216 wr = obq_wr_t5;
217 }
218 }
219 if (i)
220 return i;
221
222 t4_read_cimq_cfg(adap, base, size, thres);
223
224 seq_printf(seq,
225 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail\n");
226 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
227 seq_printf(seq, "%7s %5x %5u %5u %6x %4x %4u %4u %5u\n",
228 qname[i], base[i], size[i], thres[i],
229 IBQRDADDR_G(p[0]), IBQWRADDR_G(p[1]),
230 QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
231 QUEREMFLITS_G(p[2]) * 16);
232 for ( ; i < CIM_NUM_IBQ + cim_num_obq; i++, p += 4, wr += 2)
233 seq_printf(seq, "%7s %5x %5u %12x %4x %4u %4u %5u\n",
234 qname[i], base[i], size[i],
235 QUERDADDR_G(p[0]) & 0x3fff, wr[0] - base[i],
236 QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
237 QUEREMFLITS_G(p[2]) * 16);
238 return 0;
239}
240
241static int cim_qcfg_open(struct inode *inode, struct file *file)
242{
243 return single_open(file, cim_qcfg_show, inode->i_private);
244}
245
246static const struct file_operations cim_qcfg_fops = {
247 .owner = THIS_MODULE,
248 .open = cim_qcfg_open,
249 .read = seq_read,
250 .llseek = seq_lseek,
251 .release = single_release,
252};
253
254static int cimq_show(struct seq_file *seq, void *v, int idx)
255{
256 const u32 *p = v;
257
258 seq_printf(seq, "%#06x: %08x %08x %08x %08x\n", idx * 16, p[0], p[1],
259 p[2], p[3]);
260 return 0;
261}
262
263static int cim_ibq_open(struct inode *inode, struct file *file)
264{
265 int ret;
266 struct seq_tab *p;
267 unsigned int qid = (uintptr_t)inode->i_private & 7;
268 struct adapter *adap = inode->i_private - qid;
269
270 p = seq_open_tab(file, CIM_IBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
271 if (!p)
272 return -ENOMEM;
273
274 ret = t4_read_cim_ibq(adap, qid, (u32 *)p->data, CIM_IBQ_SIZE * 4);
275 if (ret < 0)
276 seq_release_private(inode, file);
277 else
278 ret = 0;
279 return ret;
280}
281
282static const struct file_operations cim_ibq_fops = {
283 .owner = THIS_MODULE,
284 .open = cim_ibq_open,
285 .read = seq_read,
286 .llseek = seq_lseek,
287 .release = seq_release_private
288};
289
290static int cim_obq_open(struct inode *inode, struct file *file)
291{
292 int ret;
293 struct seq_tab *p;
294 unsigned int qid = (uintptr_t)inode->i_private & 7;
295 struct adapter *adap = inode->i_private - qid;
296
297 p = seq_open_tab(file, 6 * CIM_OBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
298 if (!p)
299 return -ENOMEM;
300
301 ret = t4_read_cim_obq(adap, qid, (u32 *)p->data, 6 * CIM_OBQ_SIZE * 4);
302 if (ret < 0) {
303 seq_release_private(inode, file);
304 } else {
305 seq_tab_trim(p, ret / 4);
306 ret = 0;
307 }
308 return ret;
309}
310
311static const struct file_operations cim_obq_fops = {
312 .owner = THIS_MODULE,
313 .open = cim_obq_open,
314 .read = seq_read,
315 .llseek = seq_lseek,
316 .release = seq_release_private
317};
318
319struct field_desc {
320 const char *name;
321 unsigned int start;
322 unsigned int width;
323};
324
325static void field_desc_show(struct seq_file *seq, u64 v,
326 const struct field_desc *p)
327{
328 char buf[32];
329 int line_size = 0;
330
331 while (p->name) {
332 u64 mask = (1ULL << p->width) - 1;
333 int len = scnprintf(buf, sizeof(buf), "%s: %llu", p->name,
334 ((unsigned long long)v >> p->start) & mask);
335
336 if (line_size + len >= 79) {
337 line_size = 8;
338 seq_puts(seq, "\n ");
339 }
340 seq_printf(seq, "%s ", buf);
341 line_size += len + 1;
342 p++;
343 }
344 seq_putc(seq, '\n');
345}
346
347static struct field_desc tp_la0[] = {
348 { "RcfOpCodeOut", 60, 4 },
349 { "State", 56, 4 },
350 { "WcfState", 52, 4 },
351 { "RcfOpcSrcOut", 50, 2 },
352 { "CRxError", 49, 1 },
353 { "ERxError", 48, 1 },
354 { "SanityFailed", 47, 1 },
355 { "SpuriousMsg", 46, 1 },
356 { "FlushInputMsg", 45, 1 },
357 { "FlushInputCpl", 44, 1 },
358 { "RssUpBit", 43, 1 },
359 { "RssFilterHit", 42, 1 },
360 { "Tid", 32, 10 },
361 { "InitTcb", 31, 1 },
362 { "LineNumber", 24, 7 },
363 { "Emsg", 23, 1 },
364 { "EdataOut", 22, 1 },
365 { "Cmsg", 21, 1 },
366 { "CdataOut", 20, 1 },
367 { "EreadPdu", 19, 1 },
368 { "CreadPdu", 18, 1 },
369 { "TunnelPkt", 17, 1 },
370 { "RcfPeerFin", 16, 1 },
371 { "RcfReasonOut", 12, 4 },
372 { "TxCchannel", 10, 2 },
373 { "RcfTxChannel", 8, 2 },
374 { "RxEchannel", 6, 2 },
375 { "RcfRxChannel", 5, 1 },
376 { "RcfDataOutSrdy", 4, 1 },
377 { "RxDvld", 3, 1 },
378 { "RxOoDvld", 2, 1 },
379 { "RxCongestion", 1, 1 },
380 { "TxCongestion", 0, 1 },
381 { NULL }
382};
383
384static int tp_la_show(struct seq_file *seq, void *v, int idx)
385{
386 const u64 *p = v;
387
388 field_desc_show(seq, *p, tp_la0);
389 return 0;
390}
391
392static int tp_la_show2(struct seq_file *seq, void *v, int idx)
393{
394 const u64 *p = v;
395
396 if (idx)
397 seq_putc(seq, '\n');
398 field_desc_show(seq, p[0], tp_la0);
399 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
400 field_desc_show(seq, p[1], tp_la0);
401 return 0;
402}
403
404static int tp_la_show3(struct seq_file *seq, void *v, int idx)
405{
406 static struct field_desc tp_la1[] = {
407 { "CplCmdIn", 56, 8 },
408 { "CplCmdOut", 48, 8 },
409 { "ESynOut", 47, 1 },
410 { "EAckOut", 46, 1 },
411 { "EFinOut", 45, 1 },
412 { "ERstOut", 44, 1 },
413 { "SynIn", 43, 1 },
414 { "AckIn", 42, 1 },
415 { "FinIn", 41, 1 },
416 { "RstIn", 40, 1 },
417 { "DataIn", 39, 1 },
418 { "DataInVld", 38, 1 },
419 { "PadIn", 37, 1 },
420 { "RxBufEmpty", 36, 1 },
421 { "RxDdp", 35, 1 },
422 { "RxFbCongestion", 34, 1 },
423 { "TxFbCongestion", 33, 1 },
424 { "TxPktSumSrdy", 32, 1 },
425 { "RcfUlpType", 28, 4 },
426 { "Eread", 27, 1 },
427 { "Ebypass", 26, 1 },
428 { "Esave", 25, 1 },
429 { "Static0", 24, 1 },
430 { "Cread", 23, 1 },
431 { "Cbypass", 22, 1 },
432 { "Csave", 21, 1 },
433 { "CPktOut", 20, 1 },
434 { "RxPagePoolFull", 18, 2 },
435 { "RxLpbkPkt", 17, 1 },
436 { "TxLpbkPkt", 16, 1 },
437 { "RxVfValid", 15, 1 },
438 { "SynLearned", 14, 1 },
439 { "SetDelEntry", 13, 1 },
440 { "SetInvEntry", 12, 1 },
441 { "CpcmdDvld", 11, 1 },
442 { "CpcmdSave", 10, 1 },
443 { "RxPstructsFull", 8, 2 },
444 { "EpcmdDvld", 7, 1 },
445 { "EpcmdFlush", 6, 1 },
446 { "EpcmdTrimPrefix", 5, 1 },
447 { "EpcmdTrimPostfix", 4, 1 },
448 { "ERssIp4Pkt", 3, 1 },
449 { "ERssIp6Pkt", 2, 1 },
450 { "ERssTcpUdpPkt", 1, 1 },
451 { "ERssFceFipPkt", 0, 1 },
452 { NULL }
453 };
454 static struct field_desc tp_la2[] = {
455 { "CplCmdIn", 56, 8 },
456 { "MpsVfVld", 55, 1 },
457 { "MpsPf", 52, 3 },
458 { "MpsVf", 44, 8 },
459 { "SynIn", 43, 1 },
460 { "AckIn", 42, 1 },
461 { "FinIn", 41, 1 },
462 { "RstIn", 40, 1 },
463 { "DataIn", 39, 1 },
464 { "DataInVld", 38, 1 },
465 { "PadIn", 37, 1 },
466 { "RxBufEmpty", 36, 1 },
467 { "RxDdp", 35, 1 },
468 { "RxFbCongestion", 34, 1 },
469 { "TxFbCongestion", 33, 1 },
470 { "TxPktSumSrdy", 32, 1 },
471 { "RcfUlpType", 28, 4 },
472 { "Eread", 27, 1 },
473 { "Ebypass", 26, 1 },
474 { "Esave", 25, 1 },
475 { "Static0", 24, 1 },
476 { "Cread", 23, 1 },
477 { "Cbypass", 22, 1 },
478 { "Csave", 21, 1 },
479 { "CPktOut", 20, 1 },
480 { "RxPagePoolFull", 18, 2 },
481 { "RxLpbkPkt", 17, 1 },
482 { "TxLpbkPkt", 16, 1 },
483 { "RxVfValid", 15, 1 },
484 { "SynLearned", 14, 1 },
485 { "SetDelEntry", 13, 1 },
486 { "SetInvEntry", 12, 1 },
487 { "CpcmdDvld", 11, 1 },
488 { "CpcmdSave", 10, 1 },
489 { "RxPstructsFull", 8, 2 },
490 { "EpcmdDvld", 7, 1 },
491 { "EpcmdFlush", 6, 1 },
492 { "EpcmdTrimPrefix", 5, 1 },
493 { "EpcmdTrimPostfix", 4, 1 },
494 { "ERssIp4Pkt", 3, 1 },
495 { "ERssIp6Pkt", 2, 1 },
496 { "ERssTcpUdpPkt", 1, 1 },
497 { "ERssFceFipPkt", 0, 1 },
498 { NULL }
499 };
500 const u64 *p = v;
501
502 if (idx)
503 seq_putc(seq, '\n');
504 field_desc_show(seq, p[0], tp_la0);
505 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
506 field_desc_show(seq, p[1], (p[0] & BIT(17)) ? tp_la2 : tp_la1);
507 return 0;
508}
509
510static int tp_la_open(struct inode *inode, struct file *file)
511{
512 struct seq_tab *p;
513 struct adapter *adap = inode->i_private;
514
515 switch (DBGLAMODE_G(t4_read_reg(adap, TP_DBG_LA_CONFIG_A))) {
516 case 2:
517 p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
518 tp_la_show2);
519 break;
520 case 3:
521 p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
522 tp_la_show3);
523 break;
524 default:
525 p = seq_open_tab(file, TPLA_SIZE, sizeof(u64), 0, tp_la_show);
526 }
527 if (!p)
528 return -ENOMEM;
529
530 t4_tp_read_la(adap, (u64 *)p->data, NULL);
531 return 0;
532}
533
534static ssize_t tp_la_write(struct file *file, const char __user *buf,
535 size_t count, loff_t *pos)
536{
537 int err;
538 char s[32];
539 unsigned long val;
540 size_t size = min(sizeof(s) - 1, count);
541 struct adapter *adap = FILE_DATA(file)->i_private;
542
543 if (copy_from_user(s, buf, size))
544 return -EFAULT;
545 s[size] = '\0';
546 err = kstrtoul(s, 0, &val);
547 if (err)
548 return err;
549 if (val > 0xffff)
550 return -EINVAL;
551 adap->params.tp.la_mask = val << 16;
552 t4_set_reg_field(adap, TP_DBG_LA_CONFIG_A, 0xffff0000U,
553 adap->params.tp.la_mask);
554 return count;
555}
556
557static const struct file_operations tp_la_fops = {
558 .owner = THIS_MODULE,
559 .open = tp_la_open,
560 .read = seq_read,
561 .llseek = seq_lseek,
562 .release = seq_release_private,
563 .write = tp_la_write
564};
565
566static int ulprx_la_show(struct seq_file *seq, void *v, int idx)
567{
568 const u32 *p = v;
569
570 if (v == SEQ_START_TOKEN)
571 seq_puts(seq, " Pcmd Type Message"
572 " Data\n");
573 else
574 seq_printf(seq, "%08x%08x %4x %08x %08x%08x%08x%08x\n",
575 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
576 return 0;
577}
578
579static int ulprx_la_open(struct inode *inode, struct file *file)
580{
581 struct seq_tab *p;
582 struct adapter *adap = inode->i_private;
583
584 p = seq_open_tab(file, ULPRX_LA_SIZE, 8 * sizeof(u32), 1,
585 ulprx_la_show);
586 if (!p)
587 return -ENOMEM;
588
589 t4_ulprx_read_la(adap, (u32 *)p->data);
590 return 0;
591}
592
593static const struct file_operations ulprx_la_fops = {
594 .owner = THIS_MODULE,
595 .open = ulprx_la_open,
596 .read = seq_read,
597 .llseek = seq_lseek,
598 .release = seq_release_private
599};
600
601/* Show the PM memory stats. These stats include:
602 *
603 * TX:
604 * Read: memory read operation
605 * Write Bypass: cut-through
606 * Bypass + mem: cut-through and save copy
607 *
608 * RX:
609 * Read: memory read
610 * Write Bypass: cut-through
611 * Flush: payload trim or drop
612 */
613static int pm_stats_show(struct seq_file *seq, void *v)
614{
615 static const char * const tx_pm_stats[] = {
616 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
617 };
618 static const char * const rx_pm_stats[] = {
619 "Read:", "Write bypass:", "Write mem:", "Flush:"
620 };
621
622 int i;
623 u32 tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
624 u64 tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
625 struct adapter *adap = seq->private;
626
627 t4_pmtx_get_stats(adap, tx_cnt, tx_cyc);
628 t4_pmrx_get_stats(adap, rx_cnt, rx_cyc);
629
630 seq_printf(seq, "%13s %10s %20s\n", " ", "Tx pcmds", "Tx bytes");
631 for (i = 0; i < PM_NSTATS - 1; i++)
632 seq_printf(seq, "%-13s %10u %20llu\n",
633 tx_pm_stats[i], tx_cnt[i], tx_cyc[i]);
634
635 seq_printf(seq, "%13s %10s %20s\n", " ", "Rx pcmds", "Rx bytes");
636 for (i = 0; i < PM_NSTATS - 1; i++)
637 seq_printf(seq, "%-13s %10u %20llu\n",
638 rx_pm_stats[i], rx_cnt[i], rx_cyc[i]);
639 return 0;
640}
641
642static int pm_stats_open(struct inode *inode, struct file *file)
643{
644 return single_open(file, pm_stats_show, inode->i_private);
645}
646
647static ssize_t pm_stats_clear(struct file *file, const char __user *buf,
648 size_t count, loff_t *pos)
649{
650 struct adapter *adap = FILE_DATA(file)->i_private;
651
652 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, 0);
653 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, 0);
654 return count;
655}
656
657static const struct file_operations pm_stats_debugfs_fops = {
658 .owner = THIS_MODULE,
659 .open = pm_stats_open,
660 .read = seq_read,
661 .llseek = seq_lseek,
662 .release = single_release,
663 .write = pm_stats_clear
664};
665
666static int cctrl_tbl_show(struct seq_file *seq, void *v)
667{
668 static const char * const dec_fac[] = {
669 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
670 "0.9375" };
671
672 int i;
673 u16 incr[NMTUS][NCCTRL_WIN];
674 struct adapter *adap = seq->private;
675
676 t4_read_cong_tbl(adap, incr);
677
678 for (i = 0; i < NCCTRL_WIN; ++i) {
679 seq_printf(seq, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
680 incr[0][i], incr[1][i], incr[2][i], incr[3][i],
681 incr[4][i], incr[5][i], incr[6][i], incr[7][i]);
682 seq_printf(seq, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
683 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
684 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
685 adap->params.a_wnd[i],
686 dec_fac[adap->params.b_wnd[i]]);
687 }
688 return 0;
689}
690
691DEFINE_SIMPLE_DEBUGFS_FILE(cctrl_tbl);
692
693/* Format a value in a unit that differs from the value's native unit by the
694 * given factor.
695 */
696static char *unit_conv(char *buf, size_t len, unsigned int val,
697 unsigned int factor)
698{
699 unsigned int rem = val % factor;
700
701 if (rem == 0) {
702 snprintf(buf, len, "%u", val / factor);
703 } else {
704 while (rem % 10 == 0)
705 rem /= 10;
706 snprintf(buf, len, "%u.%u", val / factor, rem);
707 }
708 return buf;
709}
710
711static int clk_show(struct seq_file *seq, void *v)
712{
713 char buf[32];
714 struct adapter *adap = seq->private;
715 unsigned int cclk_ps = 1000000000 / adap->params.vpd.cclk; /* in ps */
716 u32 res = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
717 unsigned int tre = TIMERRESOLUTION_G(res);
718 unsigned int dack_re = DELAYEDACKRESOLUTION_G(res);
719 unsigned long long tp_tick_us = (cclk_ps << tre) / 1000000; /* in us */
720
721 seq_printf(seq, "Core clock period: %s ns\n",
722 unit_conv(buf, sizeof(buf), cclk_ps, 1000));
723 seq_printf(seq, "TP timer tick: %s us\n",
724 unit_conv(buf, sizeof(buf), (cclk_ps << tre), 1000000));
725 seq_printf(seq, "TCP timestamp tick: %s us\n",
726 unit_conv(buf, sizeof(buf),
727 (cclk_ps << TIMESTAMPRESOLUTION_G(res)), 1000000));
728 seq_printf(seq, "DACK tick: %s us\n",
729 unit_conv(buf, sizeof(buf), (cclk_ps << dack_re), 1000000));
730 seq_printf(seq, "DACK timer: %u us\n",
731 ((cclk_ps << dack_re) / 1000000) *
732 t4_read_reg(adap, TP_DACK_TIMER_A));
733 seq_printf(seq, "Retransmit min: %llu us\n",
734 tp_tick_us * t4_read_reg(adap, TP_RXT_MIN_A));
735 seq_printf(seq, "Retransmit max: %llu us\n",
736 tp_tick_us * t4_read_reg(adap, TP_RXT_MAX_A));
737 seq_printf(seq, "Persist timer min: %llu us\n",
738 tp_tick_us * t4_read_reg(adap, TP_PERS_MIN_A));
739 seq_printf(seq, "Persist timer max: %llu us\n",
740 tp_tick_us * t4_read_reg(adap, TP_PERS_MAX_A));
741 seq_printf(seq, "Keepalive idle timer: %llu us\n",
742 tp_tick_us * t4_read_reg(adap, TP_KEEP_IDLE_A));
743 seq_printf(seq, "Keepalive interval: %llu us\n",
744 tp_tick_us * t4_read_reg(adap, TP_KEEP_INTVL_A));
745 seq_printf(seq, "Initial SRTT: %llu us\n",
746 tp_tick_us * INITSRTT_G(t4_read_reg(adap, TP_INIT_SRTT_A)));
747 seq_printf(seq, "FINWAIT2 timer: %llu us\n",
748 tp_tick_us * t4_read_reg(adap, TP_FINWAIT2_TIMER_A));
749
750 return 0;
751}
752
753DEFINE_SIMPLE_DEBUGFS_FILE(clk);
754
755/* Firmware Device Log dump. */
756static const char * const devlog_level_strings[] = {
757 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
758 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
759 [FW_DEVLOG_LEVEL_ERR] = "ERR",
760 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
761 [FW_DEVLOG_LEVEL_INFO] = "INFO",
762 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
763};
764
765static const char * const devlog_facility_strings[] = {
766 [FW_DEVLOG_FACILITY_CORE] = "CORE",
767 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
768 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
769 [FW_DEVLOG_FACILITY_RES] = "RES",
770 [FW_DEVLOG_FACILITY_HW] = "HW",
771 [FW_DEVLOG_FACILITY_FLR] = "FLR",
772 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
773 [FW_DEVLOG_FACILITY_PHY] = "PHY",
774 [FW_DEVLOG_FACILITY_MAC] = "MAC",
775 [FW_DEVLOG_FACILITY_PORT] = "PORT",
776 [FW_DEVLOG_FACILITY_VI] = "VI",
777 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
778 [FW_DEVLOG_FACILITY_ACL] = "ACL",
779 [FW_DEVLOG_FACILITY_TM] = "TM",
780 [FW_DEVLOG_FACILITY_QFC] = "QFC",
781 [FW_DEVLOG_FACILITY_DCB] = "DCB",
782 [FW_DEVLOG_FACILITY_ETH] = "ETH",
783 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
784 [FW_DEVLOG_FACILITY_RI] = "RI",
785 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
786 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
787 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
788 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
789};
790
791/* Information gathered by Device Log Open routine for the display routine.
792 */
793struct devlog_info {
794 unsigned int nentries; /* number of entries in log[] */
795 unsigned int first; /* first [temporal] entry in log[] */
796 struct fw_devlog_e log[0]; /* Firmware Device Log */
797};
798
799/* Dump a Firmaware Device Log entry.
800 */
801static int devlog_show(struct seq_file *seq, void *v)
802{
803 if (v == SEQ_START_TOKEN)
804 seq_printf(seq, "%10s %15s %8s %8s %s\n",
805 "Seq#", "Tstamp", "Level", "Facility", "Message");
806 else {
807 struct devlog_info *dinfo = seq->private;
808 int fidx = (uintptr_t)v - 2;
809 unsigned long index;
810 struct fw_devlog_e *e;
811
812 /* Get a pointer to the log entry to display. Skip unused log
813 * entries.
814 */
815 index = dinfo->first + fidx;
816 if (index >= dinfo->nentries)
817 index -= dinfo->nentries;
818 e = &dinfo->log[index];
819 if (e->timestamp == 0)
820 return 0;
821
822 /* Print the message. This depends on the firmware using
823 * exactly the same formating strings as the kernel so we may
824 * eventually have to put a format interpreter in here ...
825 */
826 seq_printf(seq, "%10d %15llu %8s %8s ",
827 e->seqno, e->timestamp,
828 (e->level < ARRAY_SIZE(devlog_level_strings)
829 ? devlog_level_strings[e->level]
830 : "UNKNOWN"),
831 (e->facility < ARRAY_SIZE(devlog_facility_strings)
832 ? devlog_facility_strings[e->facility]
833 : "UNKNOWN"));
834 seq_printf(seq, e->fmt, e->params[0], e->params[1],
835 e->params[2], e->params[3], e->params[4],
836 e->params[5], e->params[6], e->params[7]);
837 }
838 return 0;
839}
840
841/* Sequential File Operations for Device Log.
842 */
843static inline void *devlog_get_idx(struct devlog_info *dinfo, loff_t pos)
844{
845 if (pos > dinfo->nentries)
846 return NULL;
847
848 return (void *)(uintptr_t)(pos + 1);
849}
850
851static void *devlog_start(struct seq_file *seq, loff_t *pos)
852{
853 struct devlog_info *dinfo = seq->private;
854
855 return (*pos
856 ? devlog_get_idx(dinfo, *pos)
857 : SEQ_START_TOKEN);
858}
859
860static void *devlog_next(struct seq_file *seq, void *v, loff_t *pos)
861{
862 struct devlog_info *dinfo = seq->private;
863
864 (*pos)++;
865 return devlog_get_idx(dinfo, *pos);
866}
867
868static void devlog_stop(struct seq_file *seq, void *v)
869{
870}
871
872static const struct seq_operations devlog_seq_ops = {
873 .start = devlog_start,
874 .next = devlog_next,
875 .stop = devlog_stop,
876 .show = devlog_show
877};
878
879/* Set up for reading the firmware's device log. We read the entire log here
880 * and then display it incrementally in devlog_show().
881 */
882static int devlog_open(struct inode *inode, struct file *file)
883{
884 struct adapter *adap = inode->i_private;
885 struct devlog_params *dparams = &adap->params.devlog;
886 struct devlog_info *dinfo;
887 unsigned int index;
888 u32 fseqno;
889 int ret;
890
891 /* If we don't know where the log is we can't do anything.
892 */
893 if (dparams->start == 0)
894 return -ENXIO;
895
896 /* Allocate the space to read in the firmware's device log and set up
897 * for the iterated call to our display function.
898 */
899 dinfo = __seq_open_private(file, &devlog_seq_ops,
900 sizeof(*dinfo) + dparams->size);
901 if (!dinfo)
902 return -ENOMEM;
903
904 /* Record the basic log buffer information and read in the raw log.
905 */
906 dinfo->nentries = (dparams->size / sizeof(struct fw_devlog_e));
907 dinfo->first = 0;
908 spin_lock(&adap->win0_lock);
909 ret = t4_memory_rw(adap, adap->params.drv_memwin, dparams->memtype,
910 dparams->start, dparams->size, (__be32 *)dinfo->log,
911 T4_MEMORY_READ);
912 spin_unlock(&adap->win0_lock);
913 if (ret) {
914 seq_release_private(inode, file);
915 return ret;
916 }
917
918 /* Translate log multi-byte integral elements into host native format
919 * and determine where the first entry in the log is.
920 */
921 for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
922 struct fw_devlog_e *e = &dinfo->log[index];
923 int i;
924 __u32 seqno;
925
926 if (e->timestamp == 0)
927 continue;
928
929 e->timestamp = (__force __be64)be64_to_cpu(e->timestamp);
930 seqno = be32_to_cpu(e->seqno);
931 for (i = 0; i < 8; i++)
932 e->params[i] =
933 (__force __be32)be32_to_cpu(e->params[i]);
934
935 if (seqno < fseqno) {
936 fseqno = seqno;
937 dinfo->first = index;
938 }
939 }
940 return 0;
941}
942
943static const struct file_operations devlog_fops = {
944 .owner = THIS_MODULE,
945 .open = devlog_open,
946 .read = seq_read,
947 .llseek = seq_lseek,
948 .release = seq_release_private
949};
950
951static int mbox_show(struct seq_file *seq, void *v)
952{
953 static const char * const owner[] = { "none", "FW", "driver",
954 "unknown" };
955
956 int i;
957 unsigned int mbox = (uintptr_t)seq->private & 7;
958 struct adapter *adap = seq->private - mbox;
959 void __iomem *addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
960 unsigned int ctrl_reg = (is_t4(adap->params.chip)
961 ? CIM_PF_MAILBOX_CTRL_A
962 : CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A);
963 void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
964
965 i = MBOWNER_G(readl(ctrl));
966 seq_printf(seq, "mailbox owned by %s\n\n", owner[i]);
967
968 for (i = 0; i < MBOX_LEN; i += 8)
969 seq_printf(seq, "%016llx\n",
970 (unsigned long long)readq(addr + i));
971 return 0;
972}
973
974static int mbox_open(struct inode *inode, struct file *file)
975{
976 return single_open(file, mbox_show, inode->i_private);
977}
978
979static ssize_t mbox_write(struct file *file, const char __user *buf,
980 size_t count, loff_t *pos)
981{
982 int i;
983 char c = '\n', s[256];
984 unsigned long long data[8];
985 const struct inode *ino;
986 unsigned int mbox;
987 struct adapter *adap;
988 void __iomem *addr;
989 void __iomem *ctrl;
990
991 if (count > sizeof(s) - 1 || !count)
992 return -EINVAL;
993 if (copy_from_user(s, buf, count))
994 return -EFAULT;
995 s[count] = '\0';
996
997 if (sscanf(s, "%llx %llx %llx %llx %llx %llx %llx %llx%c", &data[0],
998 &data[1], &data[2], &data[3], &data[4], &data[5], &data[6],
999 &data[7], &c) < 8 || c != '\n')
1000 return -EINVAL;
1001
1002 ino = FILE_DATA(file);
1003 mbox = (uintptr_t)ino->i_private & 7;
1004 adap = ino->i_private - mbox;
1005 addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
1006 ctrl = addr + MBOX_LEN;
1007
1008 if (MBOWNER_G(readl(ctrl)) != X_MBOWNER_PL)
1009 return -EBUSY;
1010
1011 for (i = 0; i < 8; i++)
1012 writeq(data[i], addr + 8 * i);
1013
1014 writel(MBMSGVALID_F | MBOWNER_V(X_MBOWNER_FW), ctrl);
1015 return count;
1016}
1017
1018static const struct file_operations mbox_debugfs_fops = {
1019 .owner = THIS_MODULE,
1020 .open = mbox_open,
1021 .read = seq_read,
1022 .llseek = seq_lseek,
1023 .release = single_release,
1024 .write = mbox_write
1025};
1026
1027static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
1028 loff_t *ppos)
1029{
1030 loff_t pos = *ppos;
1031 loff_t avail = FILE_DATA(file)->i_size;
1032 struct adapter *adap = file->private_data;
1033
1034 if (pos < 0)
1035 return -EINVAL;
1036 if (pos >= avail)
1037 return 0;
1038 if (count > avail - pos)
1039 count = avail - pos;
1040
1041 while (count) {
1042 size_t len;
1043 int ret, ofst;
1044 u8 data[256];
1045
1046 ofst = pos & 3;
1047 len = min(count + ofst, sizeof(data));
1048 ret = t4_read_flash(adap, pos - ofst, (len + 3) / 4,
1049 (u32 *)data, 1);
1050 if (ret)
1051 return ret;
1052
1053 len -= ofst;
1054 if (copy_to_user(buf, data + ofst, len))
1055 return -EFAULT;
1056
1057 buf += len;
1058 pos += len;
1059 count -= len;
1060 }
1061 count = pos - *ppos;
1062 *ppos = pos;
1063 return count;
1064}
1065
1066static const struct file_operations flash_debugfs_fops = {
1067 .owner = THIS_MODULE,
1068 .open = mem_open,
1069 .read = flash_read,
1070};
1071
1072static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
1073{
1074 *mask = x | y;
1075 y = (__force u64)cpu_to_be64(y);
1076 memcpy(addr, (char *)&y + 2, ETH_ALEN);
1077}
1078
1079static int mps_tcam_show(struct seq_file *seq, void *v)
1080{
1081 if (v == SEQ_START_TOKEN)
1082 seq_puts(seq, "Idx Ethernet address Mask Vld Ports PF"
1083 " VF Replication "
1084 "P0 P1 P2 P3 ML\n");
1085 else {
1086 u64 mask;
1087 u8 addr[ETH_ALEN];
1088 struct adapter *adap = seq->private;
1089 unsigned int idx = (uintptr_t)v - 2;
1090 u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
1091 u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
1092 u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
1093 u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
1094 u32 rplc[4] = {0, 0, 0, 0};
1095
1096 if (tcamx & tcamy) {
1097 seq_printf(seq, "%3u -\n", idx);
1098 goto out;
1099 }
1100
1101 if (cls_lo & REPLICATE_F) {
1102 struct fw_ldst_cmd ldst_cmd;
1103 int ret;
1104
1105 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
1106 ldst_cmd.op_to_addrspace =
1107 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
1108 FW_CMD_REQUEST_F |
1109 FW_CMD_READ_F |
1110 FW_LDST_CMD_ADDRSPACE_V(
1111 FW_LDST_ADDRSPC_MPS));
1112 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
1113 ldst_cmd.u.mps.fid_ctl =
1114 htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
1115 FW_LDST_CMD_CTL_V(idx));
1116 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
1117 sizeof(ldst_cmd), &ldst_cmd);
1118 if (ret)
1119 dev_warn(adap->pdev_dev, "Can't read MPS "
1120 "replication map for idx %d: %d\n",
1121 idx, -ret);
1122 else {
1123 rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
1124 rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
1125 rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
1126 rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
1127 }
1128 }
1129
1130 tcamxy2valmask(tcamx, tcamy, addr, &mask);
1131 seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
1132 "%3c %#x%4u%4d",
1133 idx, addr[0], addr[1], addr[2], addr[3], addr[4],
1134 addr[5], (unsigned long long)mask,
1135 (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
1136 PF_G(cls_lo),
1137 (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
1138 if (cls_lo & REPLICATE_F)
1139 seq_printf(seq, " %08x %08x %08x %08x",
1140 rplc[3], rplc[2], rplc[1], rplc[0]);
1141 else
1142 seq_printf(seq, "%36c", ' ');
1143 seq_printf(seq, "%4u%3u%3u%3u %#x\n",
1144 SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
1145 SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
1146 (cls_lo >> MULTILISTEN0_S) & 0xf);
1147 }
1148out: return 0;
1149}
1150
1151static inline void *mps_tcam_get_idx(struct seq_file *seq, loff_t pos)
1152{
1153 struct adapter *adap = seq->private;
1154 int max_mac_addr = is_t4(adap->params.chip) ?
1155 NUM_MPS_CLS_SRAM_L_INSTANCES :
1156 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1157 return ((pos <= max_mac_addr) ? (void *)(uintptr_t)(pos + 1) : NULL);
1158}
1159
1160static void *mps_tcam_start(struct seq_file *seq, loff_t *pos)
1161{
1162 return *pos ? mps_tcam_get_idx(seq, *pos) : SEQ_START_TOKEN;
1163}
1164
1165static void *mps_tcam_next(struct seq_file *seq, void *v, loff_t *pos)
1166{
1167 ++*pos;
1168 return mps_tcam_get_idx(seq, *pos);
1169}
1170
1171static void mps_tcam_stop(struct seq_file *seq, void *v)
1172{
1173}
1174
1175static const struct seq_operations mps_tcam_seq_ops = {
1176 .start = mps_tcam_start,
1177 .next = mps_tcam_next,
1178 .stop = mps_tcam_stop,
1179 .show = mps_tcam_show
1180};
1181
1182static int mps_tcam_open(struct inode *inode, struct file *file)
1183{
1184 int res = seq_open(file, &mps_tcam_seq_ops);
1185
1186 if (!res) {
1187 struct seq_file *seq = file->private_data;
1188
1189 seq->private = inode->i_private;
1190 }
1191 return res;
1192}
1193
1194static const struct file_operations mps_tcam_debugfs_fops = {
1195 .owner = THIS_MODULE,
1196 .open = mps_tcam_open,
1197 .read = seq_read,
1198 .llseek = seq_lseek,
1199 .release = seq_release,
1200};
1201
1202/* Display various sensor information.
1203 */
1204static int sensors_show(struct seq_file *seq, void *v)
1205{
1206 struct adapter *adap = seq->private;
1207 u32 param[7], val[7];
1208 int ret;
1209
1210 /* Note that if the sensors haven't been initialized and turned on
1211 * we'll get values of 0, so treat those as "<unknown>" ...
1212 */
1213 param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1214 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
1215 FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP));
1216 param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1217 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
1218 FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD));
1219 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
1220 param, val);
1221
1222 if (ret < 0 || val[0] == 0)
1223 seq_puts(seq, "Temperature: <unknown>\n");
1224 else
1225 seq_printf(seq, "Temperature: %dC\n", val[0]);
1226
1227 if (ret < 0 || val[1] == 0)
1228 seq_puts(seq, "Core VDD: <unknown>\n");
1229 else
1230 seq_printf(seq, "Core VDD: %dmV\n", val[1]);
1231
1232 return 0;
1233}
1234
1235DEFINE_SIMPLE_DEBUGFS_FILE(sensors);
1236
1237#if IS_ENABLED(CONFIG_IPV6)
1238static int clip_tbl_open(struct inode *inode, struct file *file)
1239{
1240 return single_open(file, clip_tbl_show, inode->i_private);
1241}
1242
1243static const struct file_operations clip_tbl_debugfs_fops = {
1244 .owner = THIS_MODULE,
1245 .open = clip_tbl_open,
1246 .read = seq_read,
1247 .llseek = seq_lseek,
1248 .release = single_release
1249};
1250#endif
1251
1252/*RSS Table.
1253 */
1254
1255static int rss_show(struct seq_file *seq, void *v, int idx)
1256{
1257 u16 *entry = v;
1258
1259 seq_printf(seq, "%4d: %4u %4u %4u %4u %4u %4u %4u %4u\n",
1260 idx * 8, entry[0], entry[1], entry[2], entry[3], entry[4],
1261 entry[5], entry[6], entry[7]);
1262 return 0;
1263}
1264
1265static int rss_open(struct inode *inode, struct file *file)
1266{
1267 int ret;
1268 struct seq_tab *p;
1269 struct adapter *adap = inode->i_private;
1270
1271 p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show);
1272 if (!p)
1273 return -ENOMEM;
1274
1275 ret = t4_read_rss(adap, (u16 *)p->data);
1276 if (ret)
1277 seq_release_private(inode, file);
1278
1279 return ret;
1280}
1281
1282static const struct file_operations rss_debugfs_fops = {
1283 .owner = THIS_MODULE,
1284 .open = rss_open,
1285 .read = seq_read,
1286 .llseek = seq_lseek,
1287 .release = seq_release_private
1288};
1289
1290/* RSS Configuration.
1291 */
1292
1293/* Small utility function to return the strings "yes" or "no" if the supplied
1294 * argument is non-zero.
1295 */
1296static const char *yesno(int x)
1297{
1298 static const char *yes = "yes";
1299 static const char *no = "no";
1300
1301 return x ? yes : no;
1302}
1303
1304static int rss_config_show(struct seq_file *seq, void *v)
1305{
1306 struct adapter *adapter = seq->private;
1307 static const char * const keymode[] = {
1308 "global",
1309 "global and per-VF scramble",
1310 "per-PF and per-VF scramble",
1311 "per-VF and per-VF scramble",
1312 };
1313 u32 rssconf;
1314
1315 rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_A);
1316 seq_printf(seq, "TP_RSS_CONFIG: %#x\n", rssconf);
1317 seq_printf(seq, " Tnl4TupEnIpv6: %3s\n", yesno(rssconf &
1318 TNL4TUPENIPV6_F));
1319 seq_printf(seq, " Tnl2TupEnIpv6: %3s\n", yesno(rssconf &
1320 TNL2TUPENIPV6_F));
1321 seq_printf(seq, " Tnl4TupEnIpv4: %3s\n", yesno(rssconf &
1322 TNL4TUPENIPV4_F));
1323 seq_printf(seq, " Tnl2TupEnIpv4: %3s\n", yesno(rssconf &
1324 TNL2TUPENIPV4_F));
1325 seq_printf(seq, " TnlTcpSel: %3s\n", yesno(rssconf & TNLTCPSEL_F));
1326 seq_printf(seq, " TnlIp6Sel: %3s\n", yesno(rssconf & TNLIP6SEL_F));
1327 seq_printf(seq, " TnlVrtSel: %3s\n", yesno(rssconf & TNLVRTSEL_F));
1328 seq_printf(seq, " TnlMapEn: %3s\n", yesno(rssconf & TNLMAPEN_F));
1329 seq_printf(seq, " OfdHashSave: %3s\n", yesno(rssconf &
1330 OFDHASHSAVE_F));
1331 seq_printf(seq, " OfdVrtSel: %3s\n", yesno(rssconf & OFDVRTSEL_F));
1332 seq_printf(seq, " OfdMapEn: %3s\n", yesno(rssconf & OFDMAPEN_F));
1333 seq_printf(seq, " OfdLkpEn: %3s\n", yesno(rssconf & OFDLKPEN_F));
1334 seq_printf(seq, " Syn4TupEnIpv6: %3s\n", yesno(rssconf &
1335 SYN4TUPENIPV6_F));
1336 seq_printf(seq, " Syn2TupEnIpv6: %3s\n", yesno(rssconf &
1337 SYN2TUPENIPV6_F));
1338 seq_printf(seq, " Syn4TupEnIpv4: %3s\n", yesno(rssconf &
1339 SYN4TUPENIPV4_F));
1340 seq_printf(seq, " Syn2TupEnIpv4: %3s\n", yesno(rssconf &
1341 SYN2TUPENIPV4_F));
1342 seq_printf(seq, " Syn4TupEnIpv6: %3s\n", yesno(rssconf &
1343 SYN4TUPENIPV6_F));
1344 seq_printf(seq, " SynIp6Sel: %3s\n", yesno(rssconf & SYNIP6SEL_F));
1345 seq_printf(seq, " SynVrt6Sel: %3s\n", yesno(rssconf & SYNVRTSEL_F));
1346 seq_printf(seq, " SynMapEn: %3s\n", yesno(rssconf & SYNMAPEN_F));
1347 seq_printf(seq, " SynLkpEn: %3s\n", yesno(rssconf & SYNLKPEN_F));
1348 seq_printf(seq, " ChnEn: %3s\n", yesno(rssconf &
1349 CHANNELENABLE_F));
1350 seq_printf(seq, " PrtEn: %3s\n", yesno(rssconf &
1351 PORTENABLE_F));
1352 seq_printf(seq, " TnlAllLkp: %3s\n", yesno(rssconf &
1353 TNLALLLOOKUP_F));
1354 seq_printf(seq, " VrtEn: %3s\n", yesno(rssconf &
1355 VIRTENABLE_F));
1356 seq_printf(seq, " CngEn: %3s\n", yesno(rssconf &
1357 CONGESTIONENABLE_F));
1358 seq_printf(seq, " HashToeplitz: %3s\n", yesno(rssconf &
1359 HASHTOEPLITZ_F));
1360 seq_printf(seq, " Udp4En: %3s\n", yesno(rssconf & UDPENABLE_F));
1361 seq_printf(seq, " Disable: %3s\n", yesno(rssconf & DISABLE_F));
1362
1363 seq_puts(seq, "\n");
1364
1365 rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_TNL_A);
1366 seq_printf(seq, "TP_RSS_CONFIG_TNL: %#x\n", rssconf);
1367 seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf));
1368 seq_printf(seq, " MaskFilter: %3d\n", MASKFILTER_G(rssconf));
1369 if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) {
1370 seq_printf(seq, " HashAll: %3s\n",
1371 yesno(rssconf & HASHALL_F));
1372 seq_printf(seq, " HashEth: %3s\n",
1373 yesno(rssconf & HASHETH_F));
1374 }
1375 seq_printf(seq, " UseWireCh: %3s\n", yesno(rssconf & USEWIRECH_F));
1376
1377 seq_puts(seq, "\n");
1378
1379 rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_OFD_A);
1380 seq_printf(seq, "TP_RSS_CONFIG_OFD: %#x\n", rssconf);
1381 seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf));
1382 seq_printf(seq, " RRCplMapEn: %3s\n", yesno(rssconf &
1383 RRCPLMAPEN_F));
1384 seq_printf(seq, " RRCplQueWidth: %3d\n", RRCPLQUEWIDTH_G(rssconf));
1385
1386 seq_puts(seq, "\n");
1387
1388 rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_SYN_A);
1389 seq_printf(seq, "TP_RSS_CONFIG_SYN: %#x\n", rssconf);
1390 seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf));
1391 seq_printf(seq, " UseWireCh: %3s\n", yesno(rssconf & USEWIRECH_F));
1392
1393 seq_puts(seq, "\n");
1394
1395 rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
1396 seq_printf(seq, "TP_RSS_CONFIG_VRT: %#x\n", rssconf);
1397 if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) {
1398 seq_printf(seq, " KeyWrAddrX: %3d\n",
1399 KEYWRADDRX_G(rssconf));
1400 seq_printf(seq, " KeyExtend: %3s\n",
1401 yesno(rssconf & KEYEXTEND_F));
1402 }
1403 seq_printf(seq, " VfRdRg: %3s\n", yesno(rssconf & VFRDRG_F));
1404 seq_printf(seq, " VfRdEn: %3s\n", yesno(rssconf & VFRDEN_F));
1405 seq_printf(seq, " VfPerrEn: %3s\n", yesno(rssconf & VFPERREN_F));
1406 seq_printf(seq, " KeyPerrEn: %3s\n", yesno(rssconf & KEYPERREN_F));
1407 seq_printf(seq, " DisVfVlan: %3s\n", yesno(rssconf &
1408 DISABLEVLAN_F));
1409 seq_printf(seq, " EnUpSwt: %3s\n", yesno(rssconf & ENABLEUP0_F));
1410 seq_printf(seq, " HashDelay: %3d\n", HASHDELAY_G(rssconf));
1411 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1412 seq_printf(seq, " VfWrAddr: %3d\n", VFWRADDR_G(rssconf));
1413 seq_printf(seq, " KeyMode: %s\n", keymode[KEYMODE_G(rssconf)]);
1414 seq_printf(seq, " VfWrEn: %3s\n", yesno(rssconf & VFWREN_F));
1415 seq_printf(seq, " KeyWrEn: %3s\n", yesno(rssconf & KEYWREN_F));
1416 seq_printf(seq, " KeyWrAddr: %3d\n", KEYWRADDR_G(rssconf));
1417
1418 seq_puts(seq, "\n");
1419
1420 rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_CNG_A);
1421 seq_printf(seq, "TP_RSS_CONFIG_CNG: %#x\n", rssconf);
1422 seq_printf(seq, " ChnCount3: %3s\n", yesno(rssconf & CHNCOUNT3_F));
1423 seq_printf(seq, " ChnCount2: %3s\n", yesno(rssconf & CHNCOUNT2_F));
1424 seq_printf(seq, " ChnCount1: %3s\n", yesno(rssconf & CHNCOUNT1_F));
1425 seq_printf(seq, " ChnCount0: %3s\n", yesno(rssconf & CHNCOUNT0_F));
1426 seq_printf(seq, " ChnUndFlow3: %3s\n", yesno(rssconf &
1427 CHNUNDFLOW3_F));
1428 seq_printf(seq, " ChnUndFlow2: %3s\n", yesno(rssconf &
1429 CHNUNDFLOW2_F));
1430 seq_printf(seq, " ChnUndFlow1: %3s\n", yesno(rssconf &
1431 CHNUNDFLOW1_F));
1432 seq_printf(seq, " ChnUndFlow0: %3s\n", yesno(rssconf &
1433 CHNUNDFLOW0_F));
1434 seq_printf(seq, " RstChn3: %3s\n", yesno(rssconf & RSTCHN3_F));
1435 seq_printf(seq, " RstChn2: %3s\n", yesno(rssconf & RSTCHN2_F));
1436 seq_printf(seq, " RstChn1: %3s\n", yesno(rssconf & RSTCHN1_F));
1437 seq_printf(seq, " RstChn0: %3s\n", yesno(rssconf & RSTCHN0_F));
1438 seq_printf(seq, " UpdVld: %3s\n", yesno(rssconf & UPDVLD_F));
1439 seq_printf(seq, " Xoff: %3s\n", yesno(rssconf & XOFF_F));
1440 seq_printf(seq, " UpdChn3: %3s\n", yesno(rssconf & UPDCHN3_F));
1441 seq_printf(seq, " UpdChn2: %3s\n", yesno(rssconf & UPDCHN2_F));
1442 seq_printf(seq, " UpdChn1: %3s\n", yesno(rssconf & UPDCHN1_F));
1443 seq_printf(seq, " UpdChn0: %3s\n", yesno(rssconf & UPDCHN0_F));
1444 seq_printf(seq, " Queue: %3d\n", QUEUE_G(rssconf));
1445
1446 return 0;
1447}
1448
1449DEFINE_SIMPLE_DEBUGFS_FILE(rss_config);
1450
1451/* RSS Secret Key.
1452 */
1453
1454static int rss_key_show(struct seq_file *seq, void *v)
1455{
1456 u32 key[10];
1457
1458 t4_read_rss_key(seq->private, key);
1459 seq_printf(seq, "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1460 key[9], key[8], key[7], key[6], key[5], key[4], key[3],
1461 key[2], key[1], key[0]);
1462 return 0;
1463}
1464
1465static int rss_key_open(struct inode *inode, struct file *file)
1466{
1467 return single_open(file, rss_key_show, inode->i_private);
1468}
1469
1470static ssize_t rss_key_write(struct file *file, const char __user *buf,
1471 size_t count, loff_t *pos)
1472{
1473 int i, j;
1474 u32 key[10];
1475 char s[100], *p;
1476 struct adapter *adap = FILE_DATA(file)->i_private;
1477
1478 if (count > sizeof(s) - 1)
1479 return -EINVAL;
1480 if (copy_from_user(s, buf, count))
1481 return -EFAULT;
1482 for (i = count; i > 0 && isspace(s[i - 1]); i--)
1483 ;
1484 s[i] = '\0';
1485
1486 for (p = s, i = 9; i >= 0; i--) {
1487 key[i] = 0;
1488 for (j = 0; j < 8; j++, p++) {
1489 if (!isxdigit(*p))
1490 return -EINVAL;
1491 key[i] = (key[i] << 4) | hex2val(*p);
1492 }
1493 }
1494
1495 t4_write_rss_key(adap, key, -1);
1496 return count;
1497}
1498
1499static const struct file_operations rss_key_debugfs_fops = {
1500 .owner = THIS_MODULE,
1501 .open = rss_key_open,
1502 .read = seq_read,
1503 .llseek = seq_lseek,
1504 .release = single_release,
1505 .write = rss_key_write
1506};
1507
1508/* PF RSS Configuration.
1509 */
1510
1511struct rss_pf_conf {
1512 u32 rss_pf_map;
1513 u32 rss_pf_mask;
1514 u32 rss_pf_config;
1515};
1516
1517static int rss_pf_config_show(struct seq_file *seq, void *v, int idx)
1518{
1519 struct rss_pf_conf *pfconf;
1520
1521 if (v == SEQ_START_TOKEN) {
1522 /* use the 0th entry to dump the PF Map Index Size */
1523 pfconf = seq->private + offsetof(struct seq_tab, data);
1524 seq_printf(seq, "PF Map Index Size = %d\n\n",
1525 LKPIDXSIZE_G(pfconf->rss_pf_map));
1526
1527 seq_puts(seq, " RSS PF VF Hash Tuple Enable Default\n");
1528 seq_puts(seq, " Enable IPF Mask Mask IPv6 IPv4 UDP Queue\n");
1529 seq_puts(seq, " PF Map Chn Prt Map Size Size Four Two Four Two Four Ch1 Ch0\n");
1530 } else {
1531 #define G_PFnLKPIDX(map, n) \
1532 (((map) >> PF1LKPIDX_S*(n)) & PF0LKPIDX_M)
1533 #define G_PFnMSKSIZE(mask, n) \
1534 (((mask) >> PF1MSKSIZE_S*(n)) & PF1MSKSIZE_M)
1535
1536 pfconf = v;
1537 seq_printf(seq, "%3d %3s %3s %3s %3d %3d %3d %3s %3s %3s %3s %3s %3d %3d\n",
1538 idx,
1539 yesno(pfconf->rss_pf_config & MAPENABLE_F),
1540 yesno(pfconf->rss_pf_config & CHNENABLE_F),
1541 yesno(pfconf->rss_pf_config & PRTENABLE_F),
1542 G_PFnLKPIDX(pfconf->rss_pf_map, idx),
1543 G_PFnMSKSIZE(pfconf->rss_pf_mask, idx),
1544 IVFWIDTH_G(pfconf->rss_pf_config),
1545 yesno(pfconf->rss_pf_config & IP6FOURTUPEN_F),
1546 yesno(pfconf->rss_pf_config & IP6TWOTUPEN_F),
1547 yesno(pfconf->rss_pf_config & IP4FOURTUPEN_F),
1548 yesno(pfconf->rss_pf_config & IP4TWOTUPEN_F),
1549 yesno(pfconf->rss_pf_config & UDPFOURTUPEN_F),
1550 CH1DEFAULTQUEUE_G(pfconf->rss_pf_config),
1551 CH0DEFAULTQUEUE_G(pfconf->rss_pf_config));
1552
1553 #undef G_PFnLKPIDX
1554 #undef G_PFnMSKSIZE
1555 }
1556 return 0;
1557}
1558
1559static int rss_pf_config_open(struct inode *inode, struct file *file)
1560{
1561 struct adapter *adapter = inode->i_private;
1562 struct seq_tab *p;
1563 u32 rss_pf_map, rss_pf_mask;
1564 struct rss_pf_conf *pfconf;
1565 int pf;
1566
1567 p = seq_open_tab(file, 8, sizeof(*pfconf), 1, rss_pf_config_show);
1568 if (!p)
1569 return -ENOMEM;
1570
1571 pfconf = (struct rss_pf_conf *)p->data;
1572 rss_pf_map = t4_read_rss_pf_map(adapter);
1573 rss_pf_mask = t4_read_rss_pf_mask(adapter);
1574 for (pf = 0; pf < 8; pf++) {
1575 pfconf[pf].rss_pf_map = rss_pf_map;
1576 pfconf[pf].rss_pf_mask = rss_pf_mask;
1577 t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config);
1578 }
1579 return 0;
1580}
1581
1582static const struct file_operations rss_pf_config_debugfs_fops = {
1583 .owner = THIS_MODULE,
1584 .open = rss_pf_config_open,
1585 .read = seq_read,
1586 .llseek = seq_lseek,
1587 .release = seq_release_private
1588};
1589
1590/* VF RSS Configuration.
1591 */
1592
1593struct rss_vf_conf {
1594 u32 rss_vf_vfl;
1595 u32 rss_vf_vfh;
1596};
1597
1598static int rss_vf_config_show(struct seq_file *seq, void *v, int idx)
1599{
1600 if (v == SEQ_START_TOKEN) {
1601 seq_puts(seq, " RSS Hash Tuple Enable\n");
1602 seq_puts(seq, " Enable IVF Dis Enb IPv6 IPv4 UDP Def Secret Key\n");
1603 seq_puts(seq, " VF Chn Prt Map VLAN uP Four Two Four Two Four Que Idx Hash\n");
1604 } else {
1605 struct rss_vf_conf *vfconf = v;
1606
1607 seq_printf(seq, "%3d %3s %3s %3d %3s %3s %3s %3s %3s %3s %3s %4d %3d %#10x\n",
1608 idx,
1609 yesno(vfconf->rss_vf_vfh & VFCHNEN_F),
1610 yesno(vfconf->rss_vf_vfh & VFPRTEN_F),
1611 VFLKPIDX_G(vfconf->rss_vf_vfh),
1612 yesno(vfconf->rss_vf_vfh & VFVLNEX_F),
1613 yesno(vfconf->rss_vf_vfh & VFUPEN_F),
1614 yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F),
1615 yesno(vfconf->rss_vf_vfh & VFIP6TWOTUPEN_F),
1616 yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F),
1617 yesno(vfconf->rss_vf_vfh & VFIP4TWOTUPEN_F),
1618 yesno(vfconf->rss_vf_vfh & ENABLEUDPHASH_F),
1619 DEFAULTQUEUE_G(vfconf->rss_vf_vfh),
1620 KEYINDEX_G(vfconf->rss_vf_vfh),
1621 vfconf->rss_vf_vfl);
1622 }
1623 return 0;
1624}
1625
1626static int rss_vf_config_open(struct inode *inode, struct file *file)
1627{
1628 struct adapter *adapter = inode->i_private;
1629 struct seq_tab *p;
1630 struct rss_vf_conf *vfconf;
1631 int vf;
1632
1633 p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show);
1634 if (!p)
1635 return -ENOMEM;
1636
1637 vfconf = (struct rss_vf_conf *)p->data;
1638 for (vf = 0; vf < 128; vf++) {
1639 t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
1640 &vfconf[vf].rss_vf_vfh);
1641 }
1642 return 0;
1643}
1644
1645static const struct file_operations rss_vf_config_debugfs_fops = {
1646 .owner = THIS_MODULE,
1647 .open = rss_vf_config_open,
1648 .read = seq_read,
1649 .llseek = seq_lseek,
1650 .release = seq_release_private
1651};
1652
1653/**
1654 * ethqset2pinfo - return port_info of an Ethernet Queue Set
1655 * @adap: the adapter
1656 * @qset: Ethernet Queue Set
1657 */
1658static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
1659{
1660 int pidx;
1661
1662 for_each_port(adap, pidx) {
1663 struct port_info *pi = adap2pinfo(adap, pidx);
1664
1665 if (qset >= pi->first_qset &&
1666 qset < pi->first_qset + pi->nqsets)
1667 return pi;
1668 }
1669
1670 /* should never happen! */
1671 BUG_ON(1);
1672 return NULL;
1673}
1674
1675static int sge_qinfo_show(struct seq_file *seq, void *v)
1676{
1677 struct adapter *adap = seq->private;
1678 int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
1679 int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
1680 int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
1681 int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
1682 int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
1683 int i, r = (uintptr_t)v - 1;
1684 int toe_idx = r - eth_entries;
1685 int rdma_idx = toe_idx - toe_entries;
1686 int ciq_idx = rdma_idx - rdma_entries;
1687 int ctrl_idx = ciq_idx - ciq_entries;
1688 int fq_idx = ctrl_idx - ctrl_entries;
1689
1690 if (r)
1691 seq_putc(seq, '\n');
1692
1693#define S3(fmt_spec, s, v) \
1694do { \
1695 seq_printf(seq, "%-12s", s); \
1696 for (i = 0; i < n; ++i) \
1697 seq_printf(seq, " %16" fmt_spec, v); \
1698 seq_putc(seq, '\n'); \
1699} while (0)
1700#define S(s, v) S3("s", s, v)
1701#define T(s, v) S3("u", s, tx[i].v)
1702#define R(s, v) S3("u", s, rx[i].v)
1703
1704 if (r < eth_entries) {
1705 int base_qset = r * 4;
1706 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset];
1707 const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset];
1708 int n = min(4, adap->sge.ethqsets - 4 * r);
1709
1710 S("QType:", "Ethernet");
1711 S("Interface:",
1712 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
1713 T("TxQ ID:", q.cntxt_id);
1714 T("TxQ size:", q.size);
1715 T("TxQ inuse:", q.in_use);
1716 T("TxQ CIDX:", q.cidx);
1717 T("TxQ PIDX:", q.pidx);
1718#ifdef CONFIG_CHELSIO_T4_DCB
1719 T("DCB Prio:", dcb_prio);
1720 S3("u", "DCB PGID:",
1721 (ethqset2pinfo(adap, base_qset + i)->dcb.pgid >>
1722 4*(7-tx[i].dcb_prio)) & 0xf);
1723 S3("u", "DCB PFC:",
1724 (ethqset2pinfo(adap, base_qset + i)->dcb.pfcen >>
1725 1*(7-tx[i].dcb_prio)) & 0x1);
1726#endif
1727 R("RspQ ID:", rspq.abs_id);
1728 R("RspQ size:", rspq.size);
1729 R("RspQE size:", rspq.iqe_len);
1730 R("RspQ CIDX:", rspq.cidx);
1731 R("RspQ Gen:", rspq.gen);
1732 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
1733 S3("u", "Intr pktcnt:",
1734 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
1735 R("FL ID:", fl.cntxt_id);
1736 R("FL size:", fl.size - 8);
1737 R("FL pend:", fl.pend_cred);
1738 R("FL avail:", fl.avail);
1739 R("FL PIDX:", fl.pidx);
1740 R("FL CIDX:", fl.cidx);
1741 } else if (toe_idx < toe_entries) {
1742 const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4];
1743 const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4];
1744 int n = min(4, adap->sge.ofldqsets - 4 * toe_idx);
1745
1746 S("QType:", "TOE");
1747 T("TxQ ID:", q.cntxt_id);
1748 T("TxQ size:", q.size);
1749 T("TxQ inuse:", q.in_use);
1750 T("TxQ CIDX:", q.cidx);
1751 T("TxQ PIDX:", q.pidx);
1752 R("RspQ ID:", rspq.abs_id);
1753 R("RspQ size:", rspq.size);
1754 R("RspQE size:", rspq.iqe_len);
1755 R("RspQ CIDX:", rspq.cidx);
1756 R("RspQ Gen:", rspq.gen);
1757 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
1758 S3("u", "Intr pktcnt:",
1759 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
1760 R("FL ID:", fl.cntxt_id);
1761 R("FL size:", fl.size - 8);
1762 R("FL pend:", fl.pend_cred);
1763 R("FL avail:", fl.avail);
1764 R("FL PIDX:", fl.pidx);
1765 R("FL CIDX:", fl.cidx);
1766 } else if (rdma_idx < rdma_entries) {
1767 const struct sge_ofld_rxq *rx =
1768 &adap->sge.rdmarxq[rdma_idx * 4];
1769 int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
1770
1771 S("QType:", "RDMA-CPL");
1772 R("RspQ ID:", rspq.abs_id);
1773 R("RspQ size:", rspq.size);
1774 R("RspQE size:", rspq.iqe_len);
1775 R("RspQ CIDX:", rspq.cidx);
1776 R("RspQ Gen:", rspq.gen);
1777 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
1778 S3("u", "Intr pktcnt:",
1779 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
1780 R("FL ID:", fl.cntxt_id);
1781 R("FL size:", fl.size - 8);
1782 R("FL pend:", fl.pend_cred);
1783 R("FL avail:", fl.avail);
1784 R("FL PIDX:", fl.pidx);
1785 R("FL CIDX:", fl.cidx);
1786 } else if (ciq_idx < ciq_entries) {
1787 const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
1788 int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
1789
1790 S("QType:", "RDMA-CIQ");
1791 R("RspQ ID:", rspq.abs_id);
1792 R("RspQ size:", rspq.size);
1793 R("RspQE size:", rspq.iqe_len);
1794 R("RspQ CIDX:", rspq.cidx);
1795 R("RspQ Gen:", rspq.gen);
1796 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
1797 S3("u", "Intr pktcnt:",
1798 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
1799 } else if (ctrl_idx < ctrl_entries) {
1800 const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
1801 int n = min(4, adap->params.nports - 4 * ctrl_idx);
1802
1803 S("QType:", "Control");
1804 T("TxQ ID:", q.cntxt_id);
1805 T("TxQ size:", q.size);
1806 T("TxQ inuse:", q.in_use);
1807 T("TxQ CIDX:", q.cidx);
1808 T("TxQ PIDX:", q.pidx);
1809 } else if (fq_idx == 0) {
1810 const struct sge_rspq *evtq = &adap->sge.fw_evtq;
1811
1812 seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
1813 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
1814 seq_printf(seq, "%-12s %16u\n", "RspQ size:", evtq->size);
1815 seq_printf(seq, "%-12s %16u\n", "RspQE size:", evtq->iqe_len);
1816 seq_printf(seq, "%-12s %16u\n", "RspQ CIDX:", evtq->cidx);
1817 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
1818 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1819 qtimer_val(adap, evtq));
1820 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1821 adap->sge.counter_val[evtq->pktcnt_idx]);
1822 }
1823#undef R
1824#undef T
1825#undef S
1826#undef S3
1827return 0;
1828}
1829
1830static int sge_queue_entries(const struct adapter *adap)
1831{
1832 return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
1833 DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
1834 DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
1835 DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
1836 DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
1837}
1838
1839static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
1840{
1841 int entries = sge_queue_entries(seq->private);
1842
1843 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1844}
1845
1846static void sge_queue_stop(struct seq_file *seq, void *v)
1847{
1848}
1849
1850static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
1851{
1852 int entries = sge_queue_entries(seq->private);
1853
1854 ++*pos;
1855 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1856}
1857
1858static const struct seq_operations sge_qinfo_seq_ops = {
1859 .start = sge_queue_start,
1860 .next = sge_queue_next,
1861 .stop = sge_queue_stop,
1862 .show = sge_qinfo_show
1863};
1864
1865static int sge_qinfo_open(struct inode *inode, struct file *file)
1866{
1867 int res = seq_open(file, &sge_qinfo_seq_ops);
1868
1869 if (!res) {
1870 struct seq_file *seq = file->private_data;
1871
1872 seq->private = inode->i_private;
1873 }
1874 return res;
1875}
1876
1877static const struct file_operations sge_qinfo_debugfs_fops = {
1878 .owner = THIS_MODULE,
1879 .open = sge_qinfo_open,
1880 .read = seq_read,
1881 .llseek = seq_lseek,
1882 .release = seq_release,
1883};
1884
1885int mem_open(struct inode *inode, struct file *file)
1886{
1887 unsigned int mem;
1888 struct adapter *adap;
1889
1890 file->private_data = inode->i_private;
1891
1892 mem = (uintptr_t)file->private_data & 0x3;
1893 adap = file->private_data - mem;
1894
1895 (void)t4_fwcache(adap, FW_PARAM_DEV_FWCACHE_FLUSH);
1896
1897 return 0;
1898}
1899
46static ssize_t mem_read(struct file *file, char __user *buf, size_t count, 1900static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
47 loff_t *ppos) 1901 loff_t *ppos)
48{ 1902{
@@ -80,7 +1934,6 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
80 *ppos = pos + count; 1934 *ppos = pos + count;
81 return count; 1935 return count;
82} 1936}
83
84static const struct file_operations mem_debugfs_fops = { 1937static const struct file_operations mem_debugfs_fops = {
85 .owner = THIS_MODULE, 1938 .owner = THIS_MODULE,
86 .open = simple_open, 1939 .open = simple_open,
@@ -88,6 +1941,12 @@ static const struct file_operations mem_debugfs_fops = {
88 .llseek = default_llseek, 1941 .llseek = default_llseek,
89}; 1942};
90 1943
1944static void set_debugfs_file_size(struct dentry *de, loff_t size)
1945{
1946 if (!IS_ERR(de) && de->d_inode)
1947 de->d_inode->i_size = size;
1948}
1949
91static void add_debugfs_mem(struct adapter *adap, const char *name, 1950static void add_debugfs_mem(struct adapter *adap, const char *name,
92 unsigned int idx, unsigned int size_mb) 1951 unsigned int idx, unsigned int size_mb)
93{ 1952{
@@ -119,14 +1978,65 @@ int t4_setup_debugfs(struct adapter *adap)
119{ 1978{
120 int i; 1979 int i;
121 u32 size; 1980 u32 size;
1981 struct dentry *de;
122 1982
123 static struct t4_debugfs_entry t4_debugfs_files[] = { 1983 static struct t4_debugfs_entry t4_debugfs_files[] = {
1984 { "cim_la", &cim_la_fops, S_IRUSR, 0 },
1985 { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
1986 { "clk", &clk_debugfs_fops, S_IRUSR, 0 },
1987 { "devlog", &devlog_fops, S_IRUSR, 0 },
1988 { "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
1989 { "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
1990 { "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
1991 { "mbox3", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
1992 { "mbox4", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 4 },
1993 { "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 },
1994 { "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 },
1995 { "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 },
124 { "l2t", &t4_l2t_fops, S_IRUSR, 0}, 1996 { "l2t", &t4_l2t_fops, S_IRUSR, 0},
1997 { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
1998 { "rss", &rss_debugfs_fops, S_IRUSR, 0 },
1999 { "rss_config", &rss_config_debugfs_fops, S_IRUSR, 0 },
2000 { "rss_key", &rss_key_debugfs_fops, S_IRUSR, 0 },
2001 { "rss_pf_config", &rss_pf_config_debugfs_fops, S_IRUSR, 0 },
2002 { "rss_vf_config", &rss_vf_config_debugfs_fops, S_IRUSR, 0 },
2003 { "sge_qinfo", &sge_qinfo_debugfs_fops, S_IRUSR, 0 },
2004 { "ibq_tp0", &cim_ibq_fops, S_IRUSR, 0 },
2005 { "ibq_tp1", &cim_ibq_fops, S_IRUSR, 1 },
2006 { "ibq_ulp", &cim_ibq_fops, S_IRUSR, 2 },
2007 { "ibq_sge0", &cim_ibq_fops, S_IRUSR, 3 },
2008 { "ibq_sge1", &cim_ibq_fops, S_IRUSR, 4 },
2009 { "ibq_ncsi", &cim_ibq_fops, S_IRUSR, 5 },
2010 { "obq_ulp0", &cim_obq_fops, S_IRUSR, 0 },
2011 { "obq_ulp1", &cim_obq_fops, S_IRUSR, 1 },
2012 { "obq_ulp2", &cim_obq_fops, S_IRUSR, 2 },
2013 { "obq_ulp3", &cim_obq_fops, S_IRUSR, 3 },
2014 { "obq_sge", &cim_obq_fops, S_IRUSR, 4 },
2015 { "obq_ncsi", &cim_obq_fops, S_IRUSR, 5 },
2016 { "tp_la", &tp_la_fops, S_IRUSR, 0 },
2017 { "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 },
2018 { "sensors", &sensors_debugfs_fops, S_IRUSR, 0 },
2019 { "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 },
2020 { "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 },
2021#if IS_ENABLED(CONFIG_IPV6)
2022 { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
2023#endif
2024 };
2025
2026 /* Debug FS nodes common to all T5 and later adapters.
2027 */
2028 static struct t4_debugfs_entry t5_debugfs_files[] = {
2029 { "obq_sge_rx_q0", &cim_obq_fops, S_IRUSR, 6 },
2030 { "obq_sge_rx_q1", &cim_obq_fops, S_IRUSR, 7 },
125 }; 2031 };
126 2032
127 add_debugfs_files(adap, 2033 add_debugfs_files(adap,
128 t4_debugfs_files, 2034 t4_debugfs_files,
129 ARRAY_SIZE(t4_debugfs_files)); 2035 ARRAY_SIZE(t4_debugfs_files));
2036 if (!is_t4(adap->params.chip))
2037 add_debugfs_files(adap,
2038 t5_debugfs_files,
2039 ARRAY_SIZE(t5_debugfs_files));
130 2040
131 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 2041 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
132 if (i & EDRAM0_ENABLE_F) { 2042 if (i & EDRAM0_ENABLE_F) {
@@ -154,5 +2064,10 @@ int t4_setup_debugfs(struct adapter *adap)
154 EXT_MEM1_SIZE_G(size)); 2064 EXT_MEM1_SIZE_G(size));
155 } 2065 }
156 } 2066 }
2067
2068 de = debugfs_create_file("flash", S_IRUSR, adap->debugfs_root, adap,
2069 &flash_debugfs_fops);
2070 set_debugfs_file_size(de, adap->params.sf_size);
2071
157 return 0; 2072 return 0;
158} 2073}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
index a3d8867efd3d..b63cfee2d963 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -37,6 +37,21 @@
37 37
38#include <linux/export.h> 38#include <linux/export.h>
39 39
40#define FILE_DATA(_file) ((_file)->f_path.dentry->d_inode)
41
42#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \
43static int name##_open(struct inode *inode, struct file *file) \
44{ \
45 return single_open(file, name##_show, inode->i_private); \
46} \
47static const struct file_operations name##_debugfs_fops = { \
48 .owner = THIS_MODULE, \
49 .open = name##_open, \
50 .read = seq_read, \
51 .llseek = seq_lseek, \
52 .release = single_release \
53}
54
40struct t4_debugfs_entry { 55struct t4_debugfs_entry {
41 const char *name; 56 const char *name;
42 const struct file_operations *ops; 57 const struct file_operations *ops;
@@ -44,9 +59,27 @@ struct t4_debugfs_entry {
44 unsigned char data; 59 unsigned char data;
45}; 60};
46 61
62struct seq_tab {
63 int (*show)(struct seq_file *seq, void *v, int idx);
64 unsigned int rows; /* # of entries */
65 unsigned char width; /* size in bytes of each entry */
66 unsigned char skip_first; /* whether the first line is a header */
67 char data[0]; /* the table data */
68};
69
70static inline unsigned int hex2val(char c)
71{
72 return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
73}
74
75struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
76 unsigned int width, unsigned int have_header,
77 int (*show)(struct seq_file *seq, void *v, int i));
78
47int t4_setup_debugfs(struct adapter *adap); 79int t4_setup_debugfs(struct adapter *adap);
48void add_debugfs_files(struct adapter *adap, 80void add_debugfs_files(struct adapter *adap,
49 struct t4_debugfs_entry *files, 81 struct t4_debugfs_entry *files,
50 unsigned int nfiles); 82 unsigned int nfiles);
83int mem_open(struct inode *inode, struct file *file);
51 84
52#endif 85#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index ccf3436024bc..a22cf932ca35 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -62,14 +62,18 @@
62#include <net/netevent.h> 62#include <net/netevent.h>
63#include <net/addrconf.h> 63#include <net/addrconf.h>
64#include <net/bonding.h> 64#include <net/bonding.h>
65#include <net/addrconf.h>
65#include <asm/uaccess.h> 66#include <asm/uaccess.h>
66 67
67#include "cxgb4.h" 68#include "cxgb4.h"
68#include "t4_regs.h" 69#include "t4_regs.h"
70#include "t4_values.h"
69#include "t4_msg.h" 71#include "t4_msg.h"
70#include "t4fw_api.h" 72#include "t4fw_api.h"
73#include "t4fw_version.h"
71#include "cxgb4_dcb.h" 74#include "cxgb4_dcb.h"
72#include "cxgb4_debugfs.h" 75#include "cxgb4_debugfs.h"
76#include "clip_tbl.h"
73#include "l2t.h" 77#include "l2t.h"
74 78
75#ifdef DRV_VERSION 79#ifdef DRV_VERSION
@@ -78,99 +82,6 @@
78#define DRV_VERSION "2.0.0-ko" 82#define DRV_VERSION "2.0.0-ko"
79#define DRV_DESC "Chelsio T4/T5 Network Driver" 83#define DRV_DESC "Chelsio T4/T5 Network Driver"
80 84
81/*
82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to
84 * recover.
85 */
86#define MAX_SGE_TIMERVAL 200U
87
88enum {
89 /*
90 * Physical Function provisioning constants.
91 */
92 PFRES_NVI = 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
95 */
96 PFRES_NEQ = 256, /* # of egress queues */
97 PFRES_NIQ = 0, /* # of ingress queues */
98 PFRES_TC = 0, /* PCI-E traffic class */
99 PFRES_NEXACTF = 128, /* # of exact MPS filters */
100
101 PFRES_R_CAPS = FW_CMD_CAP_PF,
102 PFRES_WX_CAPS = FW_CMD_CAP_PF,
103
104#ifdef CONFIG_PCI_IOV
105 /*
106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an
111 * Egress Context.
112 */
113 VFRES_NPORTS = 1, /* # of "ports" per VF */
114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
115
116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
121 VFRES_TC = 0, /* PCI-E traffic class */
122 VFRES_NEXACTF = 16, /* # of exact MPS filters */
123
124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
126#endif
127};
128
129/*
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware
133 * controls.
134 */
135static unsigned int pfvfres_pmask(struct adapter *adapter,
136 unsigned int pf, unsigned int vf)
137{
138 unsigned int portn, portvec;
139
140 /*
141 * Give PF's access to all of the ports.
142 */
143 if (vf == 0)
144 return FW_PFVF_CMD_PMASK_M;
145
146 /*
147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this.
152 */
153 if (adapter->params.nports == 0)
154 return 0;
155
156 portn = pf % adapter->params.nports;
157 portvec = adapter->params.portvec;
158 for (;;) {
159 /*
160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ...
164 */
165 unsigned int pmask = portvec ^ (portvec & (portvec-1));
166 if (portn == 0)
167 return pmask;
168 portn--;
169 portvec &= ~pmask;
170 }
171 /*NOTREACHED*/
172}
173
174enum { 85enum {
175 MAX_TXQ_ENTRIES = 16384, 86 MAX_TXQ_ENTRIES = 16384,
176 MAX_CTRL_TXQ_ENTRIES = 1024, 87 MAX_CTRL_TXQ_ENTRIES = 1024,
@@ -263,7 +174,8 @@ MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter")
263static uint force_old_init; 174static uint force_old_init;
264 175
265module_param(force_old_init, uint, 0644); 176module_param(force_old_init, uint, 0644);
266MODULE_PARM_DESC(force_old_init, "Force old initialization sequence"); 177MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
178 " parameter");
267 179
268static int dflt_msg_enable = DFLT_MSG_ENABLE; 180static int dflt_msg_enable = DFLT_MSG_ENABLE;
269 181
@@ -292,13 +204,14 @@ static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
292 204
293module_param_array(intr_holdoff, uint, NULL, 0644); 205module_param_array(intr_holdoff, uint, NULL, 0644);
294MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " 206MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
295 "0..4 in microseconds"); 207 "0..4 in microseconds, deprecated parameter");
296 208
297static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; 209static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
298 210
299module_param_array(intr_cnt, uint, NULL, 0644); 211module_param_array(intr_cnt, uint, NULL, 0644);
300MODULE_PARM_DESC(intr_cnt, 212MODULE_PARM_DESC(intr_cnt,
301 "thresholds 1..3 for queue interrupt packet counters"); 213 "thresholds 1..3 for queue interrupt packet counters, "
214 "deprecated parameter");
302 215
303/* 216/*
304 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers 217 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
@@ -318,7 +231,8 @@ static bool vf_acls;
318 231
319#ifdef CONFIG_PCI_IOV 232#ifdef CONFIG_PCI_IOV
320module_param(vf_acls, bool, 0644); 233module_param(vf_acls, bool, 0644);
321MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); 234MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
235 "deprecated parameter");
322 236
323/* Configure the number of PCI-E Virtual Function which are to be instantiated 237/* Configure the number of PCI-E Virtual Function which are to be instantiated
324 * on SR-IOV Capable Physical Functions. 238 * on SR-IOV Capable Physical Functions.
@@ -340,32 +254,11 @@ module_param(select_queue, int, 0644);
340MODULE_PARM_DESC(select_queue, 254MODULE_PARM_DESC(select_queue,
341 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method."); 255 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
342 256
343/* 257static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
344 * The filter TCAM has a fixed portion and a variable portion. The fixed
345 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
346 * ports. The variable portion is 36 bits which can include things like Exact
347 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
348 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
349 * far exceed the 36-bit budget for this "compressed" header portion of the
350 * filter. Thus, we have a scarce resource which must be carefully managed.
351 *
352 * By default we set this up to mostly match the set of filter matching
353 * capabilities of T3 but with accommodations for some of T4's more
354 * interesting features:
355 *
356 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
357 * [Inner] VLAN (17), Port (3), FCoE (1) }
358 */
359enum {
360 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
361 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
362 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
363};
364
365static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
366 258
367module_param(tp_vlan_pri_map, uint, 0644); 259module_param(tp_vlan_pri_map, uint, 0644);
368MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration"); 260MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
261 "deprecated parameter");
369 262
370static struct dentry *cxgb4_debugfs_root; 263static struct dentry *cxgb4_debugfs_root;
371 264
@@ -671,7 +564,7 @@ static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
671 if (idx >= adap->tids.ftid_base && nidx < 564 if (idx >= adap->tids.ftid_base && nidx <
672 (adap->tids.nftids + adap->tids.nsftids)) { 565 (adap->tids.nftids + adap->tids.nsftids)) {
673 idx = nidx; 566 idx = nidx;
674 ret = GET_TCB_COOKIE(rpl->cookie); 567 ret = TCB_COOKIE_G(rpl->cookie);
675 f = &adap->tids.ftid_tab[idx]; 568 f = &adap->tids.ftid_tab[idx];
676 569
677 if (ret == FW_FILTER_WR_FLT_DELETED) { 570 if (ret == FW_FILTER_WR_FLT_DELETED) {
@@ -723,7 +616,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
723 616
724 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 617 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
725 const struct cpl_sge_egr_update *p = (void *)rsp; 618 const struct cpl_sge_egr_update *p = (void *)rsp;
726 unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); 619 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
727 struct sge_txq *txq; 620 struct sge_txq *txq;
728 621
729 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; 622 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
@@ -833,11 +726,11 @@ static void disable_msi(struct adapter *adapter)
833static irqreturn_t t4_nondata_intr(int irq, void *cookie) 726static irqreturn_t t4_nondata_intr(int irq, void *cookie)
834{ 727{
835 struct adapter *adap = cookie; 728 struct adapter *adap = cookie;
729 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
836 730
837 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE)); 731 if (v & PFSW_F) {
838 if (v & PFSW) {
839 adap->swintr = 1; 732 adap->swintr = 1;
840 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v); 733 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
841 } 734 }
842 t4_slow_intr_handler(adap); 735 t4_slow_intr_handler(adap);
843 return IRQ_HANDLED; 736 return IRQ_HANDLED;
@@ -1030,8 +923,14 @@ static void quiesce_rx(struct adapter *adap)
1030 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { 923 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1031 struct sge_rspq *q = adap->sge.ingr_map[i]; 924 struct sge_rspq *q = adap->sge.ingr_map[i];
1032 925
1033 if (q && q->handler) 926 if (q && q->handler) {
1034 napi_disable(&q->napi); 927 napi_disable(&q->napi);
928 local_bh_disable();
929 while (!cxgb_poll_lock_napi(q))
930 mdelay(1);
931 local_bh_enable();
932 }
933
1035 } 934 }
1036} 935}
1037 936
@@ -1047,12 +946,14 @@ static void enable_rx(struct adapter *adap)
1047 946
1048 if (!q) 947 if (!q)
1049 continue; 948 continue;
1050 if (q->handler) 949 if (q->handler) {
950 cxgb_busy_poll_init_lock(q);
1051 napi_enable(&q->napi); 951 napi_enable(&q->napi);
952 }
1052 /* 0-increment GTS to start the timer and enable interrupts */ 953 /* 0-increment GTS to start the timer and enable interrupts */
1053 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), 954 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
1054 SEINTARM(q->intr_params) | 955 SEINTARM_V(q->intr_params) |
1055 INGRESSQID(q->cntxt_id)); 956 INGRESSQID_V(q->cntxt_id));
1056 } 957 }
1057} 958}
1058 959
@@ -1176,10 +1077,10 @@ freeout: t4_free_sge_resources(adap);
1176 } 1077 }
1177 1078
1178 t4_write_reg(adap, is_t4(adap->params.chip) ? 1079 t4_write_reg(adap, is_t4(adap->params.chip) ?
1179 MPS_TRC_RSS_CONTROL : 1080 MPS_TRC_RSS_CONTROL_A :
1180 MPS_T5_TRC_RSS_CONTROL, 1081 MPS_T5_TRC_RSS_CONTROL_A,
1181 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | 1082 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1182 QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); 1083 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1183 return 0; 1084 return 0;
1184} 1085}
1185 1086
@@ -1518,6 +1419,7 @@ static int get_eeprom_len(struct net_device *dev)
1518static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1419static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1519{ 1420{
1520 struct adapter *adapter = netdev2adap(dev); 1421 struct adapter *adapter = netdev2adap(dev);
1422 u32 exprom_vers;
1521 1423
1522 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 1424 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1523 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1425 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
@@ -1535,6 +1437,14 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1535 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), 1437 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
1536 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), 1438 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
1537 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); 1439 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
1440
1441 if (!t4_get_exprom_version(adapter, &exprom_vers))
1442 snprintf(info->erom_version, sizeof(info->erom_version),
1443 "%u.%u.%u.%u",
1444 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
1445 FW_HDR_FW_VER_MINOR_G(exprom_vers),
1446 FW_HDR_FW_VER_MICRO_G(exprom_vers),
1447 FW_HDR_FW_VER_BUILD_G(exprom_vers));
1538} 1448}
1539 1449
1540static void get_strings(struct net_device *dev, u32 stringset, u8 *data) 1450static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -1589,9 +1499,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1589 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); 1499 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1590 data += sizeof(struct queue_port_stats) / sizeof(u64); 1500 data += sizeof(struct queue_port_stats) / sizeof(u64);
1591 if (!is_t4(adapter->params.chip)) { 1501 if (!is_t4(adapter->params.chip)) {
1592 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); 1502 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
1593 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); 1503 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
1594 val2 = t4_read_reg(adapter, SGE_STAT_MATCH); 1504 val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
1595 *data = val1 - val2; 1505 *data = val1 - val2;
1596 data++; 1506 data++;
1597 *data = val2; 1507 *data = val2;
@@ -2608,8 +2518,8 @@ static int closest_thres(const struct sge *s, int thres)
2608/* 2518/*
2609 * Return a queue's interrupt hold-off time in us. 0 means no timer. 2519 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2610 */ 2520 */
2611static unsigned int qtimer_val(const struct adapter *adap, 2521unsigned int qtimer_val(const struct adapter *adap,
2612 const struct sge_rspq *q) 2522 const struct sge_rspq *q)
2613{ 2523{
2614 unsigned int idx = q->intr_params >> 1; 2524 unsigned int idx = q->intr_params >> 1;
2615 2525
@@ -3346,40 +3256,6 @@ static int tid_init(struct tid_info *t)
3346 return 0; 3256 return 0;
3347} 3257}
3348 3258
3349int cxgb4_clip_get(const struct net_device *dev,
3350 const struct in6_addr *lip)
3351{
3352 struct adapter *adap;
3353 struct fw_clip_cmd c;
3354
3355 adap = netdev2adap(dev);
3356 memset(&c, 0, sizeof(c));
3357 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3358 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3359 c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
3360 c.ip_hi = *(__be64 *)(lip->s6_addr);
3361 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3362 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3363}
3364EXPORT_SYMBOL(cxgb4_clip_get);
3365
3366int cxgb4_clip_release(const struct net_device *dev,
3367 const struct in6_addr *lip)
3368{
3369 struct adapter *adap;
3370 struct fw_clip_cmd c;
3371
3372 adap = netdev2adap(dev);
3373 memset(&c, 0, sizeof(c));
3374 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3375 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3376 c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
3377 c.ip_hi = *(__be64 *)(lip->s6_addr);
3378 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3379 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3380}
3381EXPORT_SYMBOL(cxgb4_clip_release);
3382
3383/** 3259/**
3384 * cxgb4_create_server - create an IP server 3260 * cxgb4_create_server - create an IP server
3385 * @dev: the device 3261 * @dev: the device
@@ -3415,8 +3291,8 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3415 req->peer_ip = htonl(0); 3291 req->peer_ip = htonl(0);
3416 chan = rxq_to_chan(&adap->sge, queue); 3292 chan = rxq_to_chan(&adap->sge, queue);
3417 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); 3293 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
3418 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 3294 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
3419 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 3295 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
3420 ret = t4_mgmt_tx(adap, skb); 3296 ret = t4_mgmt_tx(adap, skb);
3421 return net_xmit_eval(ret); 3297 return net_xmit_eval(ret);
3422} 3298}
@@ -3458,8 +3334,8 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3458 req->peer_ip_lo = cpu_to_be64(0); 3334 req->peer_ip_lo = cpu_to_be64(0);
3459 chan = rxq_to_chan(&adap->sge, queue); 3335 chan = rxq_to_chan(&adap->sge, queue);
3460 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); 3336 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
3461 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 3337 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
3462 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 3338 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
3463 ret = t4_mgmt_tx(adap, skb); 3339 ret = t4_mgmt_tx(adap, skb);
3464 return net_xmit_eval(ret); 3340 return net_xmit_eval(ret);
3465} 3341}
@@ -3482,8 +3358,8 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3482 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req)); 3358 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3483 INIT_TP_WR(req, 0); 3359 INIT_TP_WR(req, 0);
3484 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); 3360 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3485 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) : 3361 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
3486 LISTSVR_IPV6(0)) | QUEUENO(queue)); 3362 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
3487 ret = t4_mgmt_tx(adap, skb); 3363 ret = t4_mgmt_tx(adap, skb);
3488 return net_xmit_eval(ret); 3364 return net_xmit_eval(ret);
3489} 3365}
@@ -3600,14 +3476,14 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3600 struct adapter *adap = netdev2adap(dev); 3476 struct adapter *adap = netdev2adap(dev);
3601 u32 v1, v2, lp_count, hp_count; 3477 u32 v1, v2, lp_count, hp_count;
3602 3478
3603 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 3479 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
3604 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); 3480 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
3605 if (is_t4(adap->params.chip)) { 3481 if (is_t4(adap->params.chip)) {
3606 lp_count = G_LP_COUNT(v1); 3482 lp_count = LP_COUNT_G(v1);
3607 hp_count = G_HP_COUNT(v1); 3483 hp_count = HP_COUNT_G(v1);
3608 } else { 3484 } else {
3609 lp_count = G_LP_COUNT_T5(v1); 3485 lp_count = LP_COUNT_T5_G(v1);
3610 hp_count = G_HP_COUNT_T5(v2); 3486 hp_count = HP_COUNT_T5_G(v2);
3611 } 3487 }
3612 return lpfifo ? lp_count : hp_count; 3488 return lpfifo ? lp_count : hp_count;
3613} 3489}
@@ -3653,10 +3529,10 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3653{ 3529{
3654 struct adapter *adap = netdev2adap(dev); 3530 struct adapter *adap = netdev2adap(dev);
3655 3531
3656 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask); 3532 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
3657 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) | 3533 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
3658 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) | 3534 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
3659 HPZ3(pgsz_order[3])); 3535 HPZ3_V(pgsz_order[3]));
3660} 3536}
3661EXPORT_SYMBOL(cxgb4_iscsi_init); 3537EXPORT_SYMBOL(cxgb4_iscsi_init);
3662 3538
@@ -3666,14 +3542,14 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
3666 int ret; 3542 int ret;
3667 3543
3668 ret = t4_fwaddrspace_write(adap, adap->mbox, 3544 ret = t4_fwaddrspace_write(adap, adap->mbox,
3669 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000); 3545 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
3670 return ret; 3546 return ret;
3671} 3547}
3672EXPORT_SYMBOL(cxgb4_flush_eq_cache); 3548EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3673 3549
3674static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) 3550static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3675{ 3551{
3676 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8; 3552 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
3677 __be64 indices; 3553 __be64 indices;
3678 int ret; 3554 int ret;
3679 3555
@@ -3702,14 +3578,20 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3702 3578
3703 if (pidx != hw_pidx) { 3579 if (pidx != hw_pidx) {
3704 u16 delta; 3580 u16 delta;
3581 u32 val;
3705 3582
3706 if (pidx >= hw_pidx) 3583 if (pidx >= hw_pidx)
3707 delta = pidx - hw_pidx; 3584 delta = pidx - hw_pidx;
3708 else 3585 else
3709 delta = size - hw_pidx + pidx; 3586 delta = size - hw_pidx + pidx;
3587
3588 if (is_t4(adap->params.chip))
3589 val = PIDX_V(delta);
3590 else
3591 val = PIDX_T5_V(delta);
3710 wmb(); 3592 wmb();
3711 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 3593 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3712 QID(qid) | PIDX(delta)); 3594 QID_V(qid) | val);
3713 } 3595 }
3714out: 3596out:
3715 return ret; 3597 return ret;
@@ -3721,8 +3603,8 @@ void cxgb4_disable_db_coalescing(struct net_device *dev)
3721 struct adapter *adap; 3603 struct adapter *adap;
3722 3604
3723 adap = netdev2adap(dev); 3605 adap = netdev2adap(dev);
3724 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 3606 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
3725 F_NOCOALESCE); 3607 NOCOALESCE_F);
3726} 3608}
3727EXPORT_SYMBOL(cxgb4_disable_db_coalescing); 3609EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3728 3610
@@ -3731,7 +3613,7 @@ void cxgb4_enable_db_coalescing(struct net_device *dev)
3731 struct adapter *adap; 3613 struct adapter *adap;
3732 3614
3733 adap = netdev2adap(dev); 3615 adap = netdev2adap(dev);
3734 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0); 3616 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
3735} 3617}
3736EXPORT_SYMBOL(cxgb4_enable_db_coalescing); 3618EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3737 3619
@@ -3809,8 +3691,8 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3809 struct adapter *adap; 3691 struct adapter *adap;
3810 3692
3811 adap = netdev2adap(dev); 3693 adap = netdev2adap(dev);
3812 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO); 3694 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
3813 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI)); 3695 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
3814 3696
3815 return ((u64)hi << 32) | (u64)lo; 3697 return ((u64)hi << 32) | (u64)lo;
3816} 3698}
@@ -3870,14 +3752,14 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
3870 u32 v1, v2, lp_count, hp_count; 3752 u32 v1, v2, lp_count, hp_count;
3871 3753
3872 do { 3754 do {
3873 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 3755 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
3874 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); 3756 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
3875 if (is_t4(adap->params.chip)) { 3757 if (is_t4(adap->params.chip)) {
3876 lp_count = G_LP_COUNT(v1); 3758 lp_count = LP_COUNT_G(v1);
3877 hp_count = G_HP_COUNT(v1); 3759 hp_count = HP_COUNT_G(v1);
3878 } else { 3760 } else {
3879 lp_count = G_LP_COUNT_T5(v1); 3761 lp_count = LP_COUNT_T5_G(v1);
3880 hp_count = G_HP_COUNT_T5(v2); 3762 hp_count = HP_COUNT_T5_G(v2);
3881 } 3763 }
3882 3764
3883 if (lp_count == 0 && hp_count == 0) 3765 if (lp_count == 0 && hp_count == 0)
@@ -3904,8 +3786,8 @@ static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3904 * are committed before we tell HW about them. 3786 * are committed before we tell HW about them.
3905 */ 3787 */
3906 wmb(); 3788 wmb();
3907 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 3789 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3908 QID(q->cntxt_id) | PIDX(q->db_pidx_inc)); 3790 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
3909 q->db_pidx_inc = 0; 3791 q->db_pidx_inc = 0;
3910 } 3792 }
3911 q->db_disabled = 0; 3793 q->db_disabled = 0;
@@ -3952,9 +3834,9 @@ static void process_db_full(struct work_struct *work)
3952 drain_db_fifo(adap, dbfifo_drain_delay); 3834 drain_db_fifo(adap, dbfifo_drain_delay);
3953 enable_dbs(adap); 3835 enable_dbs(adap);
3954 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); 3836 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3955 t4_set_reg_field(adap, SGE_INT_ENABLE3, 3837 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
3956 DBFIFO_HP_INT | DBFIFO_LP_INT, 3838 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
3957 DBFIFO_HP_INT | DBFIFO_LP_INT); 3839 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
3958} 3840}
3959 3841
3960static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) 3842static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -3968,14 +3850,20 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3968 goto out; 3850 goto out;
3969 if (q->db_pidx != hw_pidx) { 3851 if (q->db_pidx != hw_pidx) {
3970 u16 delta; 3852 u16 delta;
3853 u32 val;
3971 3854
3972 if (q->db_pidx >= hw_pidx) 3855 if (q->db_pidx >= hw_pidx)
3973 delta = q->db_pidx - hw_pidx; 3856 delta = q->db_pidx - hw_pidx;
3974 else 3857 else
3975 delta = q->size - hw_pidx + q->db_pidx; 3858 delta = q->size - hw_pidx + q->db_pidx;
3859
3860 if (is_t4(adap->params.chip))
3861 val = PIDX_V(delta);
3862 else
3863 val = PIDX_T5_V(delta);
3976 wmb(); 3864 wmb();
3977 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 3865 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3978 QID(q->cntxt_id) | PIDX(delta)); 3866 QID_V(q->cntxt_id) | val);
3979 } 3867 }
3980out: 3868out:
3981 q->db_disabled = 0; 3869 q->db_disabled = 0;
@@ -4024,14 +3912,14 @@ static void process_db_drop(struct work_struct *work)
4024 dev_err(adap->pdev_dev, "doorbell drop recovery: " 3912 dev_err(adap->pdev_dev, "doorbell drop recovery: "
4025 "qid=%d, pidx_inc=%d\n", qid, pidx_inc); 3913 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
4026 else 3914 else
4027 writel(PIDX_T5(pidx_inc) | QID(bar2_qid), 3915 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
4028 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); 3916 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
4029 3917
4030 /* Re-enable BAR2 WC */ 3918 /* Re-enable BAR2 WC */
4031 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); 3919 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4032 } 3920 }
4033 3921
4034 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0); 3922 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
4035} 3923}
4036 3924
4037void t4_db_full(struct adapter *adap) 3925void t4_db_full(struct adapter *adap)
@@ -4039,8 +3927,8 @@ void t4_db_full(struct adapter *adap)
4039 if (is_t4(adap->params.chip)) { 3927 if (is_t4(adap->params.chip)) {
4040 disable_dbs(adap); 3928 disable_dbs(adap);
4041 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); 3929 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4042 t4_set_reg_field(adap, SGE_INT_ENABLE3, 3930 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
4043 DBFIFO_HP_INT | DBFIFO_LP_INT, 0); 3931 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
4044 queue_work(adap->workq, &adap->db_full_task); 3932 queue_work(adap->workq, &adap->db_full_task);
4045 } 3933 }
4046} 3934}
@@ -4081,7 +3969,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
4081 lli.nports = adap->params.nports; 3969 lli.nports = adap->params.nports;
4082 lli.wr_cred = adap->params.ofldq_wr_cred; 3970 lli.wr_cred = adap->params.ofldq_wr_cred;
4083 lli.adapter_type = adap->params.chip; 3971 lli.adapter_type = adap->params.chip;
4084 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); 3972 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
4085 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk; 3973 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4086 lli.udb_density = 1 << adap->params.sge.eq_qpp; 3974 lli.udb_density = 1 << adap->params.sge.eq_qpp;
4087 lli.ucq_density = 1 << adap->params.sge.iq_qpp; 3975 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
@@ -4089,8 +3977,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
4089 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 3977 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4090 for (i = 0; i < NCHAN; i++) 3978 for (i = 0; i < NCHAN; i++)
4091 lli.tx_modq[i] = i; 3979 lli.tx_modq[i] = i;
4092 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); 3980 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
4093 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); 3981 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
4094 lli.fw_vers = adap->params.fw_vers; 3982 lli.fw_vers = adap->params.fw_vers;
4095 lli.dbfifo_int_thresh = dbfifo_int_thresh; 3983 lli.dbfifo_int_thresh = dbfifo_int_thresh;
4096 lli.sge_ingpadboundary = adap->sge.fl_align; 3984 lli.sge_ingpadboundary = adap->sge.fl_align;
@@ -4220,148 +4108,61 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
4220} 4108}
4221EXPORT_SYMBOL(cxgb4_unregister_uld); 4109EXPORT_SYMBOL(cxgb4_unregister_uld);
4222 4110
4223/* Check if netdev on which event is occured belongs to us or not. Return
4224 * success (true) if it belongs otherwise failure (false).
4225 * Called with rcu_read_lock() held.
4226 */
4227#if IS_ENABLED(CONFIG_IPV6) 4111#if IS_ENABLED(CONFIG_IPV6)
4228static bool cxgb4_netdev(const struct net_device *netdev) 4112static int cxgb4_inet6addr_handler(struct notifier_block *this,
4113 unsigned long event, void *data)
4229{ 4114{
4115 struct inet6_ifaddr *ifa = data;
4116 struct net_device *event_dev = ifa->idev->dev;
4117 const struct device *parent = NULL;
4118#if IS_ENABLED(CONFIG_BONDING)
4230 struct adapter *adap; 4119 struct adapter *adap;
4231 int i; 4120#endif
4232 4121 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
4233 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node) 4122 event_dev = vlan_dev_real_dev(event_dev);
4234 for (i = 0; i < MAX_NPORTS; i++) 4123#if IS_ENABLED(CONFIG_BONDING)
4235 if (adap->port[i] == netdev) 4124 if (event_dev->flags & IFF_MASTER) {
4236 return true; 4125 list_for_each_entry(adap, &adapter_list, list_node) {
4237 return false; 4126 switch (event) {
4238} 4127 case NETDEV_UP:
4128 cxgb4_clip_get(adap->port[0],
4129 (const u32 *)ifa, 1);
4130 break;
4131 case NETDEV_DOWN:
4132 cxgb4_clip_release(adap->port[0],
4133 (const u32 *)ifa, 1);
4134 break;
4135 default:
4136 break;
4137 }
4138 }
4139 return NOTIFY_OK;
4140 }
4141#endif
4239 4142
4240static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, 4143 if (event_dev)
4241 unsigned long event) 4144 parent = event_dev->dev.parent;
4242{
4243 int ret = NOTIFY_DONE;
4244 4145
4245 rcu_read_lock(); 4146 if (parent && parent->driver == &cxgb4_driver.driver) {
4246 if (cxgb4_netdev(event_dev)) {
4247 switch (event) { 4147 switch (event) {
4248 case NETDEV_UP: 4148 case NETDEV_UP:
4249 ret = cxgb4_clip_get(event_dev, &ifa->addr); 4149 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
4250 if (ret < 0) {
4251 rcu_read_unlock();
4252 return ret;
4253 }
4254 ret = NOTIFY_OK;
4255 break; 4150 break;
4256 case NETDEV_DOWN: 4151 case NETDEV_DOWN:
4257 cxgb4_clip_release(event_dev, &ifa->addr); 4152 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
4258 ret = NOTIFY_OK;
4259 break; 4153 break;
4260 default: 4154 default:
4261 break; 4155 break;
4262 } 4156 }
4263 } 4157 }
4264 rcu_read_unlock(); 4158 return NOTIFY_OK;
4265 return ret;
4266}
4267
4268static int cxgb4_inet6addr_handler(struct notifier_block *this,
4269 unsigned long event, void *data)
4270{
4271 struct inet6_ifaddr *ifa = data;
4272 struct net_device *event_dev;
4273 int ret = NOTIFY_DONE;
4274 struct bonding *bond = netdev_priv(ifa->idev->dev);
4275 struct list_head *iter;
4276 struct slave *slave;
4277 struct pci_dev *first_pdev = NULL;
4278
4279 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4280 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4281 ret = clip_add(event_dev, ifa, event);
4282 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4283 /* It is possible that two different adapters are bonded in one
4284 * bond. We need to find such different adapters and add clip
4285 * in all of them only once.
4286 */
4287 bond_for_each_slave(bond, slave, iter) {
4288 if (!first_pdev) {
4289 ret = clip_add(slave->dev, ifa, event);
4290 /* If clip_add is success then only initialize
4291 * first_pdev since it means it is our device
4292 */
4293 if (ret == NOTIFY_OK)
4294 first_pdev = to_pci_dev(
4295 slave->dev->dev.parent);
4296 } else if (first_pdev !=
4297 to_pci_dev(slave->dev->dev.parent))
4298 ret = clip_add(slave->dev, ifa, event);
4299 }
4300 } else
4301 ret = clip_add(ifa->idev->dev, ifa, event);
4302
4303 return ret;
4304} 4159}
4305 4160
4161static bool inet6addr_registered;
4306static struct notifier_block cxgb4_inet6addr_notifier = { 4162static struct notifier_block cxgb4_inet6addr_notifier = {
4307 .notifier_call = cxgb4_inet6addr_handler 4163 .notifier_call = cxgb4_inet6addr_handler
4308}; 4164};
4309 4165
4310/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4311 * a physical device.
4312 * The physical device reference is needed to send the actul CLIP command.
4313 */
4314static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4315{
4316 struct inet6_dev *idev = NULL;
4317 struct inet6_ifaddr *ifa;
4318 int ret = 0;
4319
4320 idev = __in6_dev_get(root_dev);
4321 if (!idev)
4322 return ret;
4323
4324 read_lock_bh(&idev->lock);
4325 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4326 ret = cxgb4_clip_get(dev, &ifa->addr);
4327 if (ret < 0)
4328 break;
4329 }
4330 read_unlock_bh(&idev->lock);
4331
4332 return ret;
4333}
4334
4335static int update_root_dev_clip(struct net_device *dev)
4336{
4337 struct net_device *root_dev = NULL;
4338 int i, ret = 0;
4339
4340 /* First populate the real net device's IPv6 addresses */
4341 ret = update_dev_clip(dev, dev);
4342 if (ret)
4343 return ret;
4344
4345 /* Parse all bond and vlan devices layered on top of the physical dev */
4346 root_dev = netdev_master_upper_dev_get_rcu(dev);
4347 if (root_dev) {
4348 ret = update_dev_clip(root_dev, dev);
4349 if (ret)
4350 return ret;
4351 }
4352
4353 for (i = 0; i < VLAN_N_VID; i++) {
4354 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4355 if (!root_dev)
4356 continue;
4357
4358 ret = update_dev_clip(root_dev, dev);
4359 if (ret)
4360 break;
4361 }
4362 return ret;
4363}
4364
4365static void update_clip(const struct adapter *adap) 4166static void update_clip(const struct adapter *adap)
4366{ 4167{
4367 int i; 4168 int i;
@@ -4375,7 +4176,7 @@ static void update_clip(const struct adapter *adap)
4375 ret = 0; 4176 ret = 0;
4376 4177
4377 if (dev) 4178 if (dev)
4378 ret = update_root_dev_clip(dev); 4179 ret = cxgb4_update_root_dev_clip(dev);
4379 4180
4380 if (ret < 0) 4181 if (ret < 0)
4381 break; 4182 break;
@@ -4567,13 +4368,13 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4567 f->fs.val.lip[i] = val[i]; 4368 f->fs.val.lip[i] = val[i];
4568 f->fs.mask.lip[i] = ~0; 4369 f->fs.mask.lip[i] = ~0;
4569 } 4370 }
4570 if (adap->params.tp.vlan_pri_map & F_PORT) { 4371 if (adap->params.tp.vlan_pri_map & PORT_F) {
4571 f->fs.val.iport = port; 4372 f->fs.val.iport = port;
4572 f->fs.mask.iport = mask; 4373 f->fs.mask.iport = mask;
4573 } 4374 }
4574 } 4375 }
4575 4376
4576 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) { 4377 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
4577 f->fs.val.proto = IPPROTO_TCP; 4378 f->fs.val.proto = IPPROTO_TCP;
4578 f->fs.mask.proto = ~0; 4379 f->fs.mask.proto = ~0;
4579 } 4380 }
@@ -4779,11 +4580,15 @@ static const struct net_device_ops cxgb4_netdev_ops = {
4779#ifdef CONFIG_NET_POLL_CONTROLLER 4580#ifdef CONFIG_NET_POLL_CONTROLLER
4780 .ndo_poll_controller = cxgb_netpoll, 4581 .ndo_poll_controller = cxgb_netpoll,
4781#endif 4582#endif
4583#ifdef CONFIG_NET_RX_BUSY_POLL
4584 .ndo_busy_poll = cxgb_busy_poll,
4585#endif
4586
4782}; 4587};
4783 4588
4784void t4_fatal_err(struct adapter *adap) 4589void t4_fatal_err(struct adapter *adap)
4785{ 4590{
4786 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); 4591 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
4787 t4_intr_disable(adap); 4592 t4_intr_disable(adap);
4788 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); 4593 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4789} 4594}
@@ -4858,16 +4663,16 @@ static void setup_memwin(struct adapter *adap)
4858 mem_win2_base = MEMWIN2_BASE_T5; 4663 mem_win2_base = MEMWIN2_BASE_T5;
4859 mem_win2_aperture = MEMWIN2_APERTURE_T5; 4664 mem_win2_aperture = MEMWIN2_APERTURE_T5;
4860 } 4665 }
4861 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), 4666 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
4862 mem_win0_base | BIR(0) | 4667 mem_win0_base | BIR_V(0) |
4863 WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 4668 WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
4864 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), 4669 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
4865 mem_win1_base | BIR(0) | 4670 mem_win1_base | BIR_V(0) |
4866 WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 4671 WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
4867 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), 4672 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
4868 mem_win2_base | BIR(0) | 4673 mem_win2_base | BIR_V(0) |
4869 WINDOW(ilog2(mem_win2_aperture) - 10)); 4674 WINDOW_V(ilog2(mem_win2_aperture) - 10));
4870 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2)); 4675 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
4871} 4676}
4872 4677
4873static void setup_memwin_rdma(struct adapter *adap) 4678static void setup_memwin_rdma(struct adapter *adap)
@@ -4881,13 +4686,13 @@ static void setup_memwin_rdma(struct adapter *adap)
4881 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); 4686 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4882 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; 4687 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4883 t4_write_reg(adap, 4688 t4_write_reg(adap,
4884 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3), 4689 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
4885 start | BIR(1) | WINDOW(ilog2(sz_kb))); 4690 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
4886 t4_write_reg(adap, 4691 t4_write_reg(adap,
4887 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3), 4692 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
4888 adap->vres.ocq.start); 4693 adap->vres.ocq.start);
4889 t4_read_reg(adap, 4694 t4_read_reg(adap,
4890 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3)); 4695 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
4891 } 4696 }
4892} 4697}
4893 4698
@@ -4936,38 +4741,38 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4936 t4_sge_init(adap); 4741 t4_sge_init(adap);
4937 4742
4938 /* tweak some settings */ 4743 /* tweak some settings */
4939 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); 4744 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
4940 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); 4745 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
4941 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); 4746 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
4942 v = t4_read_reg(adap, TP_PIO_DATA); 4747 v = t4_read_reg(adap, TP_PIO_DATA_A);
4943 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); 4748 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
4944 4749
4945 /* first 4 Tx modulation queues point to consecutive Tx channels */ 4750 /* first 4 Tx modulation queues point to consecutive Tx channels */
4946 adap->params.tp.tx_modq_map = 0xE4; 4751 adap->params.tp.tx_modq_map = 0xE4;
4947 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP, 4752 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
4948 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map)); 4753 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
4949 4754
4950 /* associate each Tx modulation queue with consecutive Tx channels */ 4755 /* associate each Tx modulation queue with consecutive Tx channels */
4951 v = 0x84218421; 4756 v = 0x84218421;
4952 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 4757 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4953 &v, 1, A_TP_TX_SCHED_HDR); 4758 &v, 1, TP_TX_SCHED_HDR_A);
4954 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 4759 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4955 &v, 1, A_TP_TX_SCHED_FIFO); 4760 &v, 1, TP_TX_SCHED_FIFO_A);
4956 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 4761 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4957 &v, 1, A_TP_TX_SCHED_PCMD); 4762 &v, 1, TP_TX_SCHED_PCMD_A);
4958 4763
4959#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */ 4764#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4960 if (is_offload(adap)) { 4765 if (is_offload(adap)) {
4961 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 4766 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
4962 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 4767 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4963 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 4768 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4964 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 4769 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4965 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); 4770 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4966 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT, 4771 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
4967 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 4772 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4968 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 4773 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4969 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 4774 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4970 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); 4775 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4971 } 4776 }
4972 4777
4973 /* get basic stuff going */ 4778 /* get basic stuff going */
@@ -5013,16 +4818,16 @@ static int adap_init0_tweaks(struct adapter *adapter)
5013 rx_dma_offset); 4818 rx_dma_offset);
5014 rx_dma_offset = 2; 4819 rx_dma_offset = 2;
5015 } 4820 }
5016 t4_set_reg_field(adapter, SGE_CONTROL, 4821 t4_set_reg_field(adapter, SGE_CONTROL_A,
5017 PKTSHIFT_MASK, 4822 PKTSHIFT_V(PKTSHIFT_M),
5018 PKTSHIFT(rx_dma_offset)); 4823 PKTSHIFT_V(rx_dma_offset));
5019 4824
5020 /* 4825 /*
5021 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux 4826 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5022 * adds the pseudo header itself. 4827 * adds the pseudo header itself.
5023 */ 4828 */
5024 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG, 4829 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
5025 CSUM_HAS_PSEUDO_HDR, 0); 4830 CSUM_HAS_PSEUDO_HDR_F, 0);
5026 4831
5027 return 0; 4832 return 0;
5028} 4833}
@@ -5046,7 +4851,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
5046 */ 4851 */
5047 if (reset) { 4852 if (reset) {
5048 ret = t4_fw_reset(adapter, adapter->mbox, 4853 ret = t4_fw_reset(adapter, adapter->mbox,
5049 PIORSTMODE | PIORST); 4854 PIORSTMODE_F | PIORST_F);
5050 if (ret < 0) 4855 if (ret < 0)
5051 goto bye; 4856 goto bye;
5052 } 4857 }
@@ -5212,12 +5017,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
5212 if (ret < 0) 5017 if (ret < 0)
5213 goto bye; 5018 goto bye;
5214 5019
5215 /* 5020 /* Emit Firmware Configuration File information and return
5216 * Return successfully and note that we're operating with parameters 5021 * successfully.
5217 * not supplied by the driver, rather than from hard-wired
5218 * initialization constants burried in the driver.
5219 */ 5022 */
5220 adapter->flags |= USING_SOFT_PARAMS;
5221 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ 5023 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5222 "Configuration File \"%s\", version %#x, computed checksum %#x\n", 5024 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5223 config_name, finiver, cfcsum); 5025 config_name, finiver, cfcsum);
@@ -5235,249 +5037,6 @@ bye:
5235 return ret; 5037 return ret;
5236} 5038}
5237 5039
5238/*
5239 * Attempt to initialize the adapter via hard-coded, driver supplied
5240 * parameters ...
5241 */
5242static int adap_init0_no_config(struct adapter *adapter, int reset)
5243{
5244 struct sge *s = &adapter->sge;
5245 struct fw_caps_config_cmd caps_cmd;
5246 u32 v;
5247 int i, ret;
5248
5249 /*
5250 * Reset device if necessary
5251 */
5252 if (reset) {
5253 ret = t4_fw_reset(adapter, adapter->mbox,
5254 PIORSTMODE | PIORST);
5255 if (ret < 0)
5256 goto bye;
5257 }
5258
5259 /*
5260 * Get device capabilities and select which we'll be using.
5261 */
5262 memset(&caps_cmd, 0, sizeof(caps_cmd));
5263 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5264 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5265 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5266 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5267 &caps_cmd);
5268 if (ret < 0)
5269 goto bye;
5270
5271 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5272 if (!vf_acls)
5273 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5274 else
5275 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5276 } else if (vf_acls) {
5277 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5278 goto bye;
5279 }
5280 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5281 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5282 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5283 NULL);
5284 if (ret < 0)
5285 goto bye;
5286
5287 /*
5288 * Tweak configuration based on system architecture, module
5289 * parameters, etc.
5290 */
5291 ret = adap_init0_tweaks(adapter);
5292 if (ret < 0)
5293 goto bye;
5294
5295 /*
5296 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5297 * mode which maps each Virtual Interface to its own section of
5298 * the RSS Table and we turn on all map and hash enables ...
5299 */
5300 adapter->flags |= RSS_TNLALLLOOKUP;
5301 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5302 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5303 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
5304 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F |
5305 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5306 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0));
5307 if (ret < 0)
5308 goto bye;
5309
5310 /*
5311 * Set up our own fundamental resource provisioning ...
5312 */
5313 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5314 PFRES_NEQ, PFRES_NETHCTRL,
5315 PFRES_NIQFLINT, PFRES_NIQ,
5316 PFRES_TC, PFRES_NVI,
5317 FW_PFVF_CMD_CMASK_M,
5318 pfvfres_pmask(adapter, adapter->fn, 0),
5319 PFRES_NEXACTF,
5320 PFRES_R_CAPS, PFRES_WX_CAPS);
5321 if (ret < 0)
5322 goto bye;
5323
5324 /*
5325 * Perform low level SGE initialization. We need to do this before we
5326 * send the firmware the INITIALIZE command because that will cause
5327 * any other PF Drivers which are waiting for the Master
5328 * Initialization to proceed forward.
5329 */
5330 for (i = 0; i < SGE_NTIMERS - 1; i++)
5331 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5332 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5333 s->counter_val[0] = 1;
5334 for (i = 1; i < SGE_NCOUNTERS; i++)
5335 s->counter_val[i] = min(intr_cnt[i - 1],
5336 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5337 t4_sge_init(adapter);
5338
5339#ifdef CONFIG_PCI_IOV
5340 /*
5341 * Provision resource limits for Virtual Functions. We currently
5342 * grant them all the same static resource limits except for the Port
5343 * Access Rights Mask which we're assigning based on the PF. All of
5344 * the static provisioning stuff for both the PF and VF really needs
5345 * to be managed in a persistent manner for each device which the
5346 * firmware controls.
5347 */
5348 {
5349 int pf, vf;
5350
5351 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5352 if (num_vf[pf] <= 0)
5353 continue;
5354
5355 /* VF numbering starts at 1! */
5356 for (vf = 1; vf <= num_vf[pf]; vf++) {
5357 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5358 pf, vf,
5359 VFRES_NEQ, VFRES_NETHCTRL,
5360 VFRES_NIQFLINT, VFRES_NIQ,
5361 VFRES_TC, VFRES_NVI,
5362 FW_PFVF_CMD_CMASK_M,
5363 pfvfres_pmask(
5364 adapter, pf, vf),
5365 VFRES_NEXACTF,
5366 VFRES_R_CAPS, VFRES_WX_CAPS);
5367 if (ret < 0)
5368 dev_warn(adapter->pdev_dev,
5369 "failed to "\
5370 "provision pf/vf=%d/%d; "
5371 "err=%d\n", pf, vf, ret);
5372 }
5373 }
5374 }
5375#endif
5376
5377 /*
5378 * Set up the default filter mode. Later we'll want to implement this
5379 * via a firmware command, etc. ... This needs to be done before the
5380 * firmare initialization command ... If the selected set of fields
5381 * isn't equal to the default value, we'll need to make sure that the
5382 * field selections will fit in the 36-bit budget.
5383 */
5384 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5385 int j, bits = 0;
5386
5387 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5388 switch (tp_vlan_pri_map & (1 << j)) {
5389 case 0:
5390 /* compressed filter field not enabled */
5391 break;
5392 case FCOE_MASK:
5393 bits += 1;
5394 break;
5395 case PORT_MASK:
5396 bits += 3;
5397 break;
5398 case VNIC_ID_MASK:
5399 bits += 17;
5400 break;
5401 case VLAN_MASK:
5402 bits += 17;
5403 break;
5404 case TOS_MASK:
5405 bits += 8;
5406 break;
5407 case PROTOCOL_MASK:
5408 bits += 8;
5409 break;
5410 case ETHERTYPE_MASK:
5411 bits += 16;
5412 break;
5413 case MACMATCH_MASK:
5414 bits += 9;
5415 break;
5416 case MPSHITTYPE_MASK:
5417 bits += 3;
5418 break;
5419 case FRAGMENTATION_MASK:
5420 bits += 1;
5421 break;
5422 }
5423
5424 if (bits > 36) {
5425 dev_err(adapter->pdev_dev,
5426 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5427 " using %#x\n", tp_vlan_pri_map, bits,
5428 TP_VLAN_PRI_MAP_DEFAULT);
5429 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5430 }
5431 }
5432 v = tp_vlan_pri_map;
5433 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5434 &v, 1, TP_VLAN_PRI_MAP);
5435
5436 /*
5437 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5438 * to support any of the compressed filter fields above. Newer
5439 * versions of the firmware do this automatically but it doesn't hurt
5440 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5441 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5442 * since the firmware automatically turns this on and off when we have
5443 * a non-zero number of filters active (since it does have a
5444 * performance impact).
5445 */
5446 if (tp_vlan_pri_map)
5447 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5448 FIVETUPLELOOKUP_MASK,
5449 FIVETUPLELOOKUP_MASK);
5450
5451 /*
5452 * Tweak some settings.
5453 */
5454 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5455 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5456 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5457 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5458
5459 /*
5460 * Get basic stuff going by issuing the Firmware Initialize command.
5461 * Note that this _must_ be after all PFVF commands ...
5462 */
5463 ret = t4_fw_initialize(adapter, adapter->mbox);
5464 if (ret < 0)
5465 goto bye;
5466
5467 /*
5468 * Return successfully!
5469 */
5470 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5471 "driver parameters\n");
5472 return 0;
5473
5474 /*
5475 * Something bad happened. Return the error ...
5476 */
5477bye:
5478 return ret;
5479}
5480
5481static struct fw_info fw_info_array[] = { 5040static struct fw_info fw_info_array[] = {
5482 { 5041 {
5483 .chip = CHELSIO_T4, 5042 .chip = CHELSIO_T4,
@@ -5529,6 +5088,8 @@ static int adap_init0(struct adapter *adap)
5529 enum dev_state state; 5088 enum dev_state state;
5530 u32 params[7], val[7]; 5089 u32 params[7], val[7];
5531 struct fw_caps_config_cmd caps_cmd; 5090 struct fw_caps_config_cmd caps_cmd;
5091 struct fw_devlog_cmd devlog_cmd;
5092 u32 devlog_meminfo;
5532 int reset = 1; 5093 int reset = 1;
5533 5094
5534 /* Contact FW, advertising Master capability */ 5095 /* Contact FW, advertising Master capability */
@@ -5590,8 +5151,7 @@ static int adap_init0(struct adapter *adap)
5590 state, &reset); 5151 state, &reset);
5591 5152
5592 /* Cleaning up */ 5153 /* Cleaning up */
5593 if (fw != NULL) 5154 release_firmware(fw);
5594 release_firmware(fw);
5595 t4_free_mem(card_fw); 5155 t4_free_mem(card_fw);
5596 5156
5597 if (ret < 0) 5157 if (ret < 0)
@@ -5609,6 +5169,30 @@ static int adap_init0(struct adapter *adap)
5609 if (ret < 0) 5169 if (ret < 0)
5610 goto bye; 5170 goto bye;
5611 5171
5172 /* Read firmware device log parameters. We really need to find a way
5173 * to get these parameters initialized with some default values (which
5174 * are likely to be correct) for the case where we either don't
5175 * attache to the firmware or it's crashed when we probe the adapter.
5176 * That way we'll still be able to perform early firmware startup
5177 * debugging ... If the request to get the Firmware's Device Log
5178 * parameters fails, we'll live so we don't make that a fatal error.
5179 */
5180 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
5181 devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
5182 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5183 devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
5184 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
5185 &devlog_cmd);
5186 if (ret == 0) {
5187 devlog_meminfo =
5188 ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
5189 adap->params.devlog.memtype =
5190 FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
5191 adap->params.devlog.start =
5192 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
5193 adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
5194 }
5195
5612 /* 5196 /*
5613 * Find out what ports are available to us. Note that we need to do 5197 * Find out what ports are available to us. Note that we need to do
5614 * this before calling adap_init0_no_config() since it needs nports 5198 * this before calling adap_init0_no_config() since it needs nports
@@ -5624,88 +5208,58 @@ static int adap_init0(struct adapter *adap)
5624 adap->params.nports = hweight32(port_vec); 5208 adap->params.nports = hweight32(port_vec);
5625 adap->params.portvec = port_vec; 5209 adap->params.portvec = port_vec;
5626 5210
5627 /* 5211 /* If the firmware is initialized already, emit a simply note to that
5628 * If the firmware is initialized already (and we're not forcing a 5212 * effect. Otherwise, it's time to try initializing the adapter.
5629 * master initialization), note that we're living with existing
5630 * adapter parameters. Otherwise, it's time to try initializing the
5631 * adapter ...
5632 */ 5213 */
5633 if (state == DEV_STATE_INIT) { 5214 if (state == DEV_STATE_INIT) {
5634 dev_info(adap->pdev_dev, "Coming up as %s: "\ 5215 dev_info(adap->pdev_dev, "Coming up as %s: "\
5635 "Adapter already initialized\n", 5216 "Adapter already initialized\n",
5636 adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); 5217 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5637 adap->flags |= USING_SOFT_PARAMS;
5638 } else { 5218 } else {
5639 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ 5219 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5640 "Initializing adapter\n"); 5220 "Initializing adapter\n");
5641 /* 5221
5642 * If the firmware doesn't support Configuration 5222 /* Find out whether we're dealing with a version of the
5643 * Files warn user and exit, 5223 * firmware which has configuration file support.
5644 */ 5224 */
5645 if (ret < 0) 5225 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5646 dev_warn(adap->pdev_dev, "Firmware doesn't support " 5226 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
5647 "configuration file.\n"); 5227 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5648 if (force_old_init) 5228 params, val);
5649 ret = adap_init0_no_config(adap, reset);
5650 else {
5651 /*
5652 * Find out whether we're dealing with a version of
5653 * the firmware which has configuration file support.
5654 */
5655 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5656 FW_PARAMS_PARAM_X_V(
5657 FW_PARAMS_PARAM_DEV_CF));
5658 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5659 params, val);
5660
5661 /*
5662 * If the firmware doesn't support Configuration
5663 * Files, use the old Driver-based, hard-wired
5664 * initialization. Otherwise, try using the
5665 * Configuration File support and fall back to the
5666 * Driver-based initialization if there's no
5667 * Configuration File found.
5668 */
5669 if (ret < 0)
5670 ret = adap_init0_no_config(adap, reset);
5671 else {
5672 /*
5673 * The firmware provides us with a memory
5674 * buffer where we can load a Configuration
5675 * File from the host if we want to override
5676 * the Configuration File in flash.
5677 */
5678 5229
5679 ret = adap_init0_config(adap, reset); 5230 /* If the firmware doesn't support Configuration Files,
5680 if (ret == -ENOENT) { 5231 * return an error.
5681 dev_info(adap->pdev_dev, 5232 */
5682 "No Configuration File present " 5233 if (ret < 0) {
5683 "on adapter. Using hard-wired " 5234 dev_err(adap->pdev_dev, "firmware doesn't support "
5684 "configuration parameters.\n"); 5235 "Firmware Configuration Files\n");
5685 ret = adap_init0_no_config(adap, reset); 5236 goto bye;
5686 } 5237 }
5687 } 5238
5239 /* The firmware provides us with a memory buffer where we can
5240 * load a Configuration File from the host if we want to
5241 * override the Configuration File in flash.
5242 */
5243 ret = adap_init0_config(adap, reset);
5244 if (ret == -ENOENT) {
5245 dev_err(adap->pdev_dev, "no Configuration File "
5246 "present on adapter.\n");
5247 goto bye;
5688 } 5248 }
5689 if (ret < 0) { 5249 if (ret < 0) {
5690 dev_err(adap->pdev_dev, 5250 dev_err(adap->pdev_dev, "could not initialize "
5691 "could not initialize adapter, error %d\n", 5251 "adapter, error %d\n", -ret);
5692 -ret);
5693 goto bye; 5252 goto bye;
5694 } 5253 }
5695 } 5254 }
5696 5255
5697 /* 5256 /* Give the SGE code a chance to pull in anything that it needs ...
5698 * If we're living with non-hard-coded parameters (either from a 5257 * Note that this must be called after we retrieve our VPD parameters
5699 * Firmware Configuration File or values programmed by a different PF 5258 * in order to know how to convert core ticks to seconds, etc.
5700 * Driver), give the SGE code a chance to pull in anything that it
5701 * needs ... Note that this must be called after we retrieve our VPD
5702 * parameters in order to know how to convert core ticks to seconds.
5703 */ 5259 */
5704 if (adap->flags & USING_SOFT_PARAMS) { 5260 ret = t4_sge_init(adap);
5705 ret = t4_sge_init(adap); 5261 if (ret < 0)
5706 if (ret < 0) 5262 goto bye;
5707 goto bye;
5708 }
5709 5263
5710 if (is_bypass_device(adap->pdev->device)) 5264 if (is_bypass_device(adap->pdev->device))
5711 adap->params.bypass = 1; 5265 adap->params.bypass = 1;
@@ -5739,6 +5293,14 @@ static int adap_init0(struct adapter *adap)
5739 adap->tids.nftids = val[4] - val[3] + 1; 5293 adap->tids.nftids = val[4] - val[3] + 1;
5740 adap->sge.ingr_start = val[5]; 5294 adap->sge.ingr_start = val[5];
5741 5295
5296 params[0] = FW_PARAM_PFVF(CLIP_START);
5297 params[1] = FW_PARAM_PFVF(CLIP_END);
5298 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5299 if (ret < 0)
5300 goto bye;
5301 adap->clipt_start = val[0];
5302 adap->clipt_end = val[1];
5303
5742 /* query params related to active filter region */ 5304 /* query params related to active filter region */
5743 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); 5305 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5744 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); 5306 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
@@ -6401,7 +5963,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6401 goto out_unmap_bar0; 5963 goto out_unmap_bar0;
6402 5964
6403 /* We control everything through one PF */ 5965 /* We control everything through one PF */
6404 func = SOURCEPF_GET(readl(regs + PL_WHOAMI)); 5966 func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
6405 if (func != ent->driver_data) { 5967 if (func != ent->driver_data) {
6406 iounmap(regs); 5968 iounmap(regs);
6407 pci_disable_device(pdev); 5969 pci_disable_device(pdev);
@@ -6467,9 +6029,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6467 6029
6468 6030
6469 if (!is_t4(adapter->params.chip)) { 6031 if (!is_t4(adapter->params.chip)) {
6470 s_qpp = QUEUESPERPAGEPF1 * adapter->fn; 6032 s_qpp = (QUEUESPERPAGEPF0_S +
6471 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, 6033 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6472 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); 6034 adapter->fn);
6035 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6036 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
6473 num_seg = PAGE_SIZE / SEGMENT_SIZE; 6037 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6474 6038
6475 /* Each segment size is 128B. Write coalescing is enabled only 6039 /* Each segment size is 128B. Write coalescing is enabled only
@@ -6557,6 +6121,18 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6557 adapter->params.offload = 0; 6121 adapter->params.offload = 0;
6558 } 6122 }
6559 6123
6124#if IS_ENABLED(CONFIG_IPV6)
6125 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6126 adapter->clipt_end);
6127 if (!adapter->clipt) {
6128 /* We tolerate a lack of clip_table, giving up
6129 * some functionality
6130 */
6131 dev_warn(&pdev->dev,
6132 "could not allocate Clip table, continuing\n");
6133 adapter->params.offload = 0;
6134 }
6135#endif
6560 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { 6136 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6561 dev_warn(&pdev->dev, "could not allocate TID table, " 6137 dev_warn(&pdev->dev, "could not allocate TID table, "
6562 "continuing\n"); 6138 "continuing\n");
@@ -6682,6 +6258,9 @@ static void remove_one(struct pci_dev *pdev)
6682 cxgb_down(adapter); 6258 cxgb_down(adapter);
6683 6259
6684 free_some_resources(adapter); 6260 free_some_resources(adapter);
6261#if IS_ENABLED(CONFIG_IPV6)
6262 t4_cleanup_clip_tbl(adapter);
6263#endif
6685 iounmap(adapter->regs); 6264 iounmap(adapter->regs);
6686 if (!is_t4(adapter->params.chip)) 6265 if (!is_t4(adapter->params.chip))
6687 iounmap(adapter->bar2); 6266 iounmap(adapter->bar2);
@@ -6720,7 +6299,10 @@ static int __init cxgb4_init_module(void)
6720 debugfs_remove(cxgb4_debugfs_root); 6299 debugfs_remove(cxgb4_debugfs_root);
6721 6300
6722#if IS_ENABLED(CONFIG_IPV6) 6301#if IS_ENABLED(CONFIG_IPV6)
6723 register_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6302 if (!inet6addr_registered) {
6303 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6304 inet6addr_registered = true;
6305 }
6724#endif 6306#endif
6725 6307
6726 return ret; 6308 return ret;
@@ -6729,7 +6311,10 @@ static int __init cxgb4_init_module(void)
6729static void __exit cxgb4_cleanup_module(void) 6311static void __exit cxgb4_cleanup_module(void)
6730{ 6312{
6731#if IS_ENABLED(CONFIG_IPV6) 6313#if IS_ENABLED(CONFIG_IPV6)
6732 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6314 if (inet6addr_registered) {
6315 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6316 inet6addr_registered = false;
6317 }
6733#endif 6318#endif
6734 pci_unregister_driver(&cxgb4_driver); 6319 pci_unregister_driver(&cxgb4_driver);
6735 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ 6320 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 152b4c4c7809..78ab4d406ce2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -173,9 +173,6 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
173 unsigned char port, unsigned char mask); 173 unsigned char port, unsigned char mask);
174int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, 174int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
175 unsigned int queue, bool ipv6); 175 unsigned int queue, bool ipv6);
176int cxgb4_clip_get(const struct net_device *dev, const struct in6_addr *lip);
177int cxgb4_clip_release(const struct net_device *dev,
178 const struct in6_addr *lip);
179 176
180static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) 177static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
181{ 178{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index a047baa9fd04..252efc29321f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -46,6 +46,7 @@
46#include "t4_msg.h" 46#include "t4_msg.h"
47#include "t4fw_api.h" 47#include "t4fw_api.h"
48#include "t4_regs.h" 48#include "t4_regs.h"
49#include "t4_values.h"
49 50
50#define VLAN_NONE 0xfff 51#define VLAN_NONE 0xfff
51 52
@@ -150,8 +151,8 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
150 151
151 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, 152 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
152 e->idx | (sync ? F_SYNC_WR : 0) | 153 e->idx | (sync ? F_SYNC_WR : 0) |
153 TID_QID(adap->sge.fw_evtq.abs_id))); 154 TID_QID_V(adap->sge.fw_evtq.abs_id)));
154 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); 155 req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
155 req->l2t_idx = htons(e->idx); 156 req->l2t_idx = htons(e->idx);
156 req->vlan = htons(e->vlan); 157 req->vlan = htons(e->vlan);
157 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) 158 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
@@ -425,7 +426,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
425 * in the Compressed Filter Tuple. 426 * in the Compressed Filter Tuple.
426 */ 427 */
427 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) 428 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
428 ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift; 429 ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
429 430
430 if (tp->port_shift >= 0) 431 if (tp->port_shift >= 0)
431 ntuple |= (u64)l2t->lport << tp->port_shift; 432 ntuple |= (u64)l2t->lport << tp->port_shift;
@@ -439,9 +440,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
439 u32 pf = FW_VIID_PFN_G(viid); 440 u32 pf = FW_VIID_PFN_G(viid);
440 u32 vld = FW_VIID_VIVLD_G(viid); 441 u32 vld = FW_VIID_VIVLD_G(viid);
441 442
442 ntuple |= (u64)(V_FT_VNID_ID_VF(vf) | 443 ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
443 V_FT_VNID_ID_PF(pf) | 444 FT_VNID_ID_PF_V(pf) |
444 V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; 445 FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
445 } 446 }
446 447
447 return ntuple; 448 return ntuple;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ebf935a1e352..b4b9f6048fe7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -43,8 +43,12 @@
43#include <linux/export.h> 43#include <linux/export.h>
44#include <net/ipv6.h> 44#include <net/ipv6.h>
45#include <net/tcp.h> 45#include <net/tcp.h>
46#ifdef CONFIG_NET_RX_BUSY_POLL
47#include <net/busy_poll.h>
48#endif /* CONFIG_NET_RX_BUSY_POLL */
46#include "cxgb4.h" 49#include "cxgb4.h"
47#include "t4_regs.h" 50#include "t4_regs.h"
51#include "t4_values.h"
48#include "t4_msg.h" 52#include "t4_msg.h"
49#include "t4fw_api.h" 53#include "t4fw_api.h"
50 54
@@ -521,10 +525,12 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
521{ 525{
522 u32 val; 526 u32 val;
523 if (q->pend_cred >= 8) { 527 if (q->pend_cred >= 8) {
524 val = PIDX(q->pend_cred / 8); 528 if (is_t4(adap->params.chip))
525 if (!is_t4(adap->params.chip)) 529 val = PIDX_V(q->pend_cred / 8);
526 val |= DBTYPE(1); 530 else
527 val |= DBPRIO(1); 531 val = PIDX_T5_V(q->pend_cred / 8) |
532 DBTYPE_F;
533 val |= DBPRIO_F;
528 wmb(); 534 wmb();
529 535
530 /* If we don't have access to the new User Doorbell (T5+), use 536 /* If we don't have access to the new User Doorbell (T5+), use
@@ -532,10 +538,10 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
532 * mechanism. 538 * mechanism.
533 */ 539 */
534 if (unlikely(q->bar2_addr == NULL)) { 540 if (unlikely(q->bar2_addr == NULL)) {
535 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 541 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
536 val | QID(q->cntxt_id)); 542 val | QID_V(q->cntxt_id));
537 } else { 543 } else {
538 writel(val | QID(q->bar2_qid), 544 writel(val | QID_V(q->bar2_qid),
539 q->bar2_addr + SGE_UDB_KDOORBELL); 545 q->bar2_addr + SGE_UDB_KDOORBELL);
540 546
541 /* This Write memory Barrier will force the write to 547 /* This Write memory Barrier will force the write to
@@ -818,7 +824,8 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
818 sgl->addr0 = cpu_to_be64(addr[1]); 824 sgl->addr0 = cpu_to_be64(addr[1]);
819 } 825 }
820 826
821 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); 827 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
828 ULPTX_NSGE_V(nfrags));
822 if (likely(--nfrags == 0)) 829 if (likely(--nfrags == 0))
823 return; 830 return;
824 /* 831 /*
@@ -884,7 +891,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
884 * doorbell mechanism; otherwise use the new BAR2 mechanism. 891 * doorbell mechanism; otherwise use the new BAR2 mechanism.
885 */ 892 */
886 if (unlikely(q->bar2_addr == NULL)) { 893 if (unlikely(q->bar2_addr == NULL)) {
887 u32 val = PIDX(n); 894 u32 val = PIDX_V(n);
888 unsigned long flags; 895 unsigned long flags;
889 896
890 /* For T4 we need to participate in the Doorbell Recovery 897 /* For T4 we need to participate in the Doorbell Recovery
@@ -892,14 +899,14 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
892 */ 899 */
893 spin_lock_irqsave(&q->db_lock, flags); 900 spin_lock_irqsave(&q->db_lock, flags);
894 if (!q->db_disabled) 901 if (!q->db_disabled)
895 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 902 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
896 QID(q->cntxt_id) | val); 903 QID_V(q->cntxt_id) | val);
897 else 904 else
898 q->db_pidx_inc += n; 905 q->db_pidx_inc += n;
899 q->db_pidx = q->pidx; 906 q->db_pidx = q->pidx;
900 spin_unlock_irqrestore(&q->db_lock, flags); 907 spin_unlock_irqrestore(&q->db_lock, flags);
901 } else { 908 } else {
902 u32 val = PIDX_T5(n); 909 u32 val = PIDX_T5_V(n);
903 910
904 /* T4 and later chips share the same PIDX field offset within 911 /* T4 and later chips share the same PIDX field offset within
905 * the doorbell, but T5 and later shrank the field in order to 912 * the doorbell, but T5 and later shrank the field in order to
@@ -907,7 +914,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
907 * large in the first place (14 bits) so we just use the T5 914 * large in the first place (14 bits) so we just use the T5
908 * and later limits and warn if a Queue ID is too large. 915 * and later limits and warn if a Queue ID is too large.
909 */ 916 */
910 WARN_ON(val & DBPRIO(1)); 917 WARN_ON(val & DBPRIO_F);
911 918
912 /* If we're only writing a single TX Descriptor and we can use 919 /* If we're only writing a single TX Descriptor and we can use
913 * Inferred QID registers, we can use the Write Combining 920 * Inferred QID registers, we can use the Write Combining
@@ -923,7 +930,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
923 (q->bar2_addr + SGE_UDB_WCDOORBELL), 930 (q->bar2_addr + SGE_UDB_WCDOORBELL),
924 wr); 931 wr);
925 } else { 932 } else {
926 writel(val | QID(q->bar2_qid), 933 writel(val | QID_V(q->bar2_qid),
927 q->bar2_addr + SGE_UDB_KDOORBELL); 934 q->bar2_addr + SGE_UDB_KDOORBELL);
928 } 935 }
929 936
@@ -1150,9 +1157,9 @@ out_free: dev_kfree_skb_any(skb);
1150 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; 1157 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
1151 } 1158 }
1152 1159
1153 if (vlan_tx_tag_present(skb)) { 1160 if (skb_vlan_tag_present(skb)) {
1154 q->vlan_ins++; 1161 q->vlan_ins++;
1155 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); 1162 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
1156 } 1163 }
1157 1164
1158 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | 1165 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
@@ -1716,6 +1723,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1716 skb->truesize += skb->data_len; 1723 skb->truesize += skb->data_len;
1717 skb->ip_summed = CHECKSUM_UNNECESSARY; 1724 skb->ip_summed = CHECKSUM_UNNECESSARY;
1718 skb_record_rx_queue(skb, rxq->rspq.idx); 1725 skb_record_rx_queue(skb, rxq->rspq.idx);
1726 skb_mark_napi_id(skb, &rxq->rspq.napi);
1719 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) 1727 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1720 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 1728 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1721 PKT_HASH_TYPE_L3); 1729 PKT_HASH_TYPE_L3);
@@ -1758,7 +1766,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1758 pkt = (const struct cpl_rx_pkt *)rsp; 1766 pkt = (const struct cpl_rx_pkt *)rsp;
1759 csum_ok = pkt->csum_calc && !pkt->err_vec && 1767 csum_ok = pkt->csum_calc && !pkt->err_vec &&
1760 (q->netdev->features & NETIF_F_RXCSUM); 1768 (q->netdev->features & NETIF_F_RXCSUM);
1761 if ((pkt->l2info & htonl(RXF_TCP)) && 1769 if ((pkt->l2info & htonl(RXF_TCP_F)) &&
1770 !(cxgb_poll_busy_polling(q)) &&
1762 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 1771 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1763 do_gro(rxq, si, pkt); 1772 do_gro(rxq, si, pkt);
1764 return 0; 1773 return 0;
@@ -1780,11 +1789,11 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1780 1789
1781 rxq->stats.pkts++; 1790 rxq->stats.pkts++;
1782 1791
1783 if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { 1792 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
1784 if (!pkt->ip_frag) { 1793 if (!pkt->ip_frag) {
1785 skb->ip_summed = CHECKSUM_UNNECESSARY; 1794 skb->ip_summed = CHECKSUM_UNNECESSARY;
1786 rxq->stats.rx_cso++; 1795 rxq->stats.rx_cso++;
1787 } else if (pkt->l2info & htonl(RXF_IP)) { 1796 } else if (pkt->l2info & htonl(RXF_IP_F)) {
1788 __sum16 c = (__force __sum16)pkt->csum; 1797 __sum16 c = (__force __sum16)pkt->csum;
1789 skb->csum = csum_unfold(c); 1798 skb->csum = csum_unfold(c);
1790 skb->ip_summed = CHECKSUM_COMPLETE; 1799 skb->ip_summed = CHECKSUM_COMPLETE;
@@ -1797,6 +1806,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1797 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 1806 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1798 rxq->stats.vlan_ex++; 1807 rxq->stats.vlan_ex++;
1799 } 1808 }
1809 skb_mark_napi_id(skb, &q->napi);
1800 netif_receive_skb(skb); 1810 netif_receive_skb(skb);
1801 return 0; 1811 return 0;
1802} 1812}
@@ -1959,6 +1969,38 @@ static int process_responses(struct sge_rspq *q, int budget)
1959 return budget - budget_left; 1969 return budget - budget_left;
1960} 1970}
1961 1971
1972#ifdef CONFIG_NET_RX_BUSY_POLL
1973int cxgb_busy_poll(struct napi_struct *napi)
1974{
1975 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1976 unsigned int params, work_done;
1977 u32 val;
1978
1979 if (!cxgb_poll_lock_poll(q))
1980 return LL_FLUSH_BUSY;
1981
1982 work_done = process_responses(q, 4);
1983 params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
1984 q->next_intr_params = params;
1985 val = CIDXINC_V(work_done) | SEINTARM_V(params);
1986
1987 /* If we don't have access to the new User GTS (T5+), use the old
1988 * doorbell mechanism; otherwise use the new BAR2 mechanism.
1989 */
1990 if (unlikely(!q->bar2_addr))
1991 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
1992 val | INGRESSQID_V((u32)q->cntxt_id));
1993 else {
1994 writel(val | INGRESSQID_V(q->bar2_qid),
1995 q->bar2_addr + SGE_UDB_GTS);
1996 wmb();
1997 }
1998
1999 cxgb_poll_unlock_poll(q);
2000 return work_done;
2001}
2002#endif /* CONFIG_NET_RX_BUSY_POLL */
2003
1962/** 2004/**
1963 * napi_rx_handler - the NAPI handler for Rx processing 2005 * napi_rx_handler - the NAPI handler for Rx processing
1964 * @napi: the napi instance 2006 * @napi: the napi instance
@@ -1974,9 +2016,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
1974{ 2016{
1975 unsigned int params; 2017 unsigned int params;
1976 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); 2018 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1977 int work_done = process_responses(q, budget); 2019 int work_done;
1978 u32 val; 2020 u32 val;
1979 2021
2022 if (!cxgb_poll_lock_napi(q))
2023 return budget;
2024
2025 work_done = process_responses(q, budget);
1980 if (likely(work_done < budget)) { 2026 if (likely(work_done < budget)) {
1981 int timer_index; 2027 int timer_index;
1982 2028
@@ -2001,19 +2047,20 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
2001 } else 2047 } else
2002 params = QINTR_TIMER_IDX(7); 2048 params = QINTR_TIMER_IDX(7);
2003 2049
2004 val = CIDXINC(work_done) | SEINTARM(params); 2050 val = CIDXINC_V(work_done) | SEINTARM_V(params);
2005 2051
2006 /* If we don't have access to the new User GTS (T5+), use the old 2052 /* If we don't have access to the new User GTS (T5+), use the old
2007 * doorbell mechanism; otherwise use the new BAR2 mechanism. 2053 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2008 */ 2054 */
2009 if (unlikely(q->bar2_addr == NULL)) { 2055 if (unlikely(q->bar2_addr == NULL)) {
2010 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), 2056 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2011 val | INGRESSQID((u32)q->cntxt_id)); 2057 val | INGRESSQID_V((u32)q->cntxt_id));
2012 } else { 2058 } else {
2013 writel(val | INGRESSQID(q->bar2_qid), 2059 writel(val | INGRESSQID_V(q->bar2_qid),
2014 q->bar2_addr + SGE_UDB_GTS); 2060 q->bar2_addr + SGE_UDB_GTS);
2015 wmb(); 2061 wmb();
2016 } 2062 }
2063 cxgb_poll_unlock_napi(q);
2017 return work_done; 2064 return work_done;
2018} 2065}
2019 2066
@@ -2056,16 +2103,16 @@ static unsigned int process_intrq(struct adapter *adap)
2056 rspq_next(q); 2103 rspq_next(q);
2057 } 2104 }
2058 2105
2059 val = CIDXINC(credits) | SEINTARM(q->intr_params); 2106 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
2060 2107
2061 /* If we don't have access to the new User GTS (T5+), use the old 2108 /* If we don't have access to the new User GTS (T5+), use the old
2062 * doorbell mechanism; otherwise use the new BAR2 mechanism. 2109 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2063 */ 2110 */
2064 if (unlikely(q->bar2_addr == NULL)) { 2111 if (unlikely(q->bar2_addr == NULL)) {
2065 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), 2112 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
2066 val | INGRESSQID(q->cntxt_id)); 2113 val | INGRESSQID_V(q->cntxt_id));
2067 } else { 2114 } else {
2068 writel(val | INGRESSQID(q->bar2_qid), 2115 writel(val | INGRESSQID_V(q->bar2_qid),
2069 q->bar2_addr + SGE_UDB_GTS); 2116 q->bar2_addr + SGE_UDB_GTS);
2070 wmb(); 2117 wmb();
2071 } 2118 }
@@ -2095,7 +2142,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
2095{ 2142{
2096 struct adapter *adap = cookie; 2143 struct adapter *adap = cookie;
2097 2144
2098 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0); 2145 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
2099 if (t4_slow_intr_handler(adap) | process_intrq(adap)) 2146 if (t4_slow_intr_handler(adap) | process_intrq(adap))
2100 return IRQ_HANDLED; 2147 return IRQ_HANDLED;
2101 return IRQ_NONE; /* probably shared interrupt */ 2148 return IRQ_NONE; /* probably shared interrupt */
@@ -2142,9 +2189,9 @@ static void sge_rx_timer_cb(unsigned long data)
2142 } 2189 }
2143 } 2190 }
2144 2191
2145 t4_write_reg(adap, SGE_DEBUG_INDEX, 13); 2192 t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
2146 idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); 2193 idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
2147 idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); 2194 idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2148 2195
2149 for (i = 0; i < 2; i++) { 2196 for (i = 0; i < 2; i++) {
2150 u32 debug0, debug11; 2197 u32 debug0, debug11;
@@ -2188,12 +2235,12 @@ static void sge_rx_timer_cb(unsigned long data)
2188 /* Read and save the SGE IDMA State and Queue ID information. 2235 /* Read and save the SGE IDMA State and Queue ID information.
2189 * We do this every time in case it changes across time ... 2236 * We do this every time in case it changes across time ...
2190 */ 2237 */
2191 t4_write_reg(adap, SGE_DEBUG_INDEX, 0); 2238 t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
2192 debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); 2239 debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2193 s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; 2240 s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
2194 2241
2195 t4_write_reg(adap, SGE_DEBUG_INDEX, 11); 2242 t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
2196 debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); 2243 debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2197 s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; 2244 s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
2198 2245
2199 CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n", 2246 CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
@@ -2337,6 +2384,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2337 goto err; 2384 goto err;
2338 2385
2339 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); 2386 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2387 napi_hash_add(&iq->napi);
2340 iq->cur_desc = iq->desc; 2388 iq->cur_desc = iq->desc;
2341 iq->cidx = 0; 2389 iq->cidx = 0;
2342 iq->gen = 1; 2390 iq->gen = 1;
@@ -2594,6 +2642,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2594 rq->cntxt_id, fl_id, 0xffff); 2642 rq->cntxt_id, fl_id, 0xffff);
2595 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 2643 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2596 rq->desc, rq->phys_addr); 2644 rq->desc, rq->phys_addr);
2645 napi_hash_del(&rq->napi);
2597 netif_napi_del(&rq->napi); 2646 netif_napi_del(&rq->napi);
2598 rq->netdev = NULL; 2647 rq->netdev = NULL;
2599 rq->cntxt_id = rq->abs_id = 0; 2648 rq->cntxt_id = rq->abs_id = 0;
@@ -2738,24 +2787,11 @@ void t4_sge_stop(struct adapter *adap)
2738} 2787}
2739 2788
2740/** 2789/**
2741 * t4_sge_init - initialize SGE 2790 * t4_sge_init_soft - grab core SGE values needed by SGE code
2742 * @adap: the adapter 2791 * @adap: the adapter
2743 * 2792 *
2744 * Performs SGE initialization needed every time after a chip reset. 2793 * We need to grab the SGE operating parameters that we need to have
2745 * We do not initialize any of the queues here, instead the driver 2794 * in order to do our job and make sure we can live with them.
2746 * top-level must request them individually.
2747 *
2748 * Called in two different modes:
2749 *
2750 * 1. Perform actual hardware initialization and record hard-coded
2751 * parameters which were used. This gets used when we're the
2752 * Master PF and the Firmware Configuration File support didn't
2753 * work for some reason.
2754 *
2755 * 2. We're not the Master PF or initialization was performed with
2756 * a Firmware Configuration File. In this case we need to grab
2757 * any of the SGE operating parameters that we need to have in
2758 * order to do our job and make sure we can live with them ...
2759 */ 2795 */
2760 2796
2761static int t4_sge_init_soft(struct adapter *adap) 2797static int t4_sge_init_soft(struct adapter *adap)
@@ -2770,8 +2806,8 @@ static int t4_sge_init_soft(struct adapter *adap)
2770 * process_responses() and that only packet data is going to the 2806 * process_responses() and that only packet data is going to the
2771 * Free Lists. 2807 * Free Lists.
2772 */ 2808 */
2773 if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) != 2809 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
2774 RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { 2810 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2775 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); 2811 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
2776 return -EINVAL; 2812 return -EINVAL;
2777 } 2813 }
@@ -2785,7 +2821,7 @@ static int t4_sge_init_soft(struct adapter *adap)
2785 * XXX meet our needs! 2821 * XXX meet our needs!
2786 */ 2822 */
2787 #define READ_FL_BUF(x) \ 2823 #define READ_FL_BUF(x) \
2788 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32)) 2824 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
2789 2825
2790 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); 2826 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2791 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); 2827 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
@@ -2823,99 +2859,38 @@ static int t4_sge_init_soft(struct adapter *adap)
2823 * Retrieve our RX interrupt holdoff timer values and counter 2859 * Retrieve our RX interrupt holdoff timer values and counter
2824 * threshold values from the SGE parameters. 2860 * threshold values from the SGE parameters.
2825 */ 2861 */
2826 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1); 2862 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
2827 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3); 2863 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
2828 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5); 2864 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
2829 s->timer_val[0] = core_ticks_to_us(adap, 2865 s->timer_val[0] = core_ticks_to_us(adap,
2830 TIMERVALUE0_GET(timer_value_0_and_1)); 2866 TIMERVALUE0_G(timer_value_0_and_1));
2831 s->timer_val[1] = core_ticks_to_us(adap, 2867 s->timer_val[1] = core_ticks_to_us(adap,
2832 TIMERVALUE1_GET(timer_value_0_and_1)); 2868 TIMERVALUE1_G(timer_value_0_and_1));
2833 s->timer_val[2] = core_ticks_to_us(adap, 2869 s->timer_val[2] = core_ticks_to_us(adap,
2834 TIMERVALUE2_GET(timer_value_2_and_3)); 2870 TIMERVALUE2_G(timer_value_2_and_3));
2835 s->timer_val[3] = core_ticks_to_us(adap, 2871 s->timer_val[3] = core_ticks_to_us(adap,
2836 TIMERVALUE3_GET(timer_value_2_and_3)); 2872 TIMERVALUE3_G(timer_value_2_and_3));
2837 s->timer_val[4] = core_ticks_to_us(adap, 2873 s->timer_val[4] = core_ticks_to_us(adap,
2838 TIMERVALUE4_GET(timer_value_4_and_5)); 2874 TIMERVALUE4_G(timer_value_4_and_5));
2839 s->timer_val[5] = core_ticks_to_us(adap, 2875 s->timer_val[5] = core_ticks_to_us(adap,
2840 TIMERVALUE5_GET(timer_value_4_and_5)); 2876 TIMERVALUE5_G(timer_value_4_and_5));
2841 2877
2842 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD); 2878 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
2843 s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold); 2879 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
2844 s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold); 2880 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
2845 s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold); 2881 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
2846 s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold); 2882 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
2847
2848 return 0;
2849}
2850
2851static int t4_sge_init_hard(struct adapter *adap)
2852{
2853 struct sge *s = &adap->sge;
2854
2855 /*
2856 * Set up our basic SGE mode to deliver CPL messages to our Ingress
2857 * Queue and Packet Date to the Free List.
2858 */
2859 t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
2860 RXPKTCPLMODE_MASK);
2861
2862 /*
2863 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
2864 * and generate an interrupt when this occurs so we can recover.
2865 */
2866 if (is_t4(adap->params.chip)) {
2867 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2868 V_HP_INT_THRESH(M_HP_INT_THRESH) |
2869 V_LP_INT_THRESH(M_LP_INT_THRESH),
2870 V_HP_INT_THRESH(dbfifo_int_thresh) |
2871 V_LP_INT_THRESH(dbfifo_int_thresh));
2872 } else {
2873 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2874 V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
2875 V_LP_INT_THRESH_T5(dbfifo_int_thresh));
2876 t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
2877 V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
2878 V_HP_INT_THRESH_T5(dbfifo_int_thresh));
2879 }
2880 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
2881 F_ENABLE_DROP);
2882
2883 /*
2884 * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
2885 * t4_fixup_host_params().
2886 */
2887 s->fl_pg_order = FL_PG_ORDER;
2888 if (s->fl_pg_order)
2889 t4_write_reg(adap,
2890 SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
2891 PAGE_SIZE << FL_PG_ORDER);
2892 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
2893 FL_MTU_SMALL_BUFSIZE(adap));
2894 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
2895 FL_MTU_LARGE_BUFSIZE(adap));
2896
2897 /*
2898 * Note that the SGE Ingress Packet Count Interrupt Threshold and
2899 * Timer Holdoff values must be supplied by our caller.
2900 */
2901 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2902 THRESHOLD_0(s->counter_val[0]) |
2903 THRESHOLD_1(s->counter_val[1]) |
2904 THRESHOLD_2(s->counter_val[2]) |
2905 THRESHOLD_3(s->counter_val[3]));
2906 t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2907 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2908 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2909 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2910 TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
2911 TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
2912 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2913 TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
2914 TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
2915 2883
2916 return 0; 2884 return 0;
2917} 2885}
2918 2886
2887/**
2888 * t4_sge_init - initialize SGE
2889 * @adap: the adapter
2890 *
2891 * Perform low-level SGE code initialization needed every time after a
2892 * chip reset.
2893 */
2919int t4_sge_init(struct adapter *adap) 2894int t4_sge_init(struct adapter *adap)
2920{ 2895{
2921 struct sge *s = &adap->sge; 2896 struct sge *s = &adap->sge;
@@ -2927,9 +2902,9 @@ int t4_sge_init(struct adapter *adap)
2927 * Ingress Padding Boundary and Egress Status Page Size are set up by 2902 * Ingress Padding Boundary and Egress Status Page Size are set up by
2928 * t4_fixup_host_params(). 2903 * t4_fixup_host_params().
2929 */ 2904 */
2930 sge_control = t4_read_reg(adap, SGE_CONTROL); 2905 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
2931 s->pktshift = PKTSHIFT_GET(sge_control); 2906 s->pktshift = PKTSHIFT_G(sge_control);
2932 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; 2907 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
2933 2908
2934 /* T4 uses a single control field to specify both the PCIe Padding and 2909 /* T4 uses a single control field to specify both the PCIe Padding and
2935 * Packing Boundary. T5 introduced the ability to specify these 2910 * Packing Boundary. T5 introduced the ability to specify these
@@ -2937,8 +2912,8 @@ int t4_sge_init(struct adapter *adap)
2937 * within Packed Buffer Mode is the maximum of these two 2912 * within Packed Buffer Mode is the maximum of these two
2938 * specifications. 2913 * specifications.
2939 */ 2914 */
2940 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) + 2915 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
2941 X_INGPADBOUNDARY_SHIFT); 2916 INGPADBOUNDARY_SHIFT_X);
2942 if (is_t4(adap->params.chip)) { 2917 if (is_t4(adap->params.chip)) {
2943 s->fl_align = ingpadboundary; 2918 s->fl_align = ingpadboundary;
2944 } else { 2919 } else {
@@ -2956,10 +2931,7 @@ int t4_sge_init(struct adapter *adap)
2956 s->fl_align = max(ingpadboundary, ingpackboundary); 2931 s->fl_align = max(ingpadboundary, ingpackboundary);
2957 } 2932 }
2958 2933
2959 if (adap->flags & USING_SOFT_PARAMS) 2934 ret = t4_sge_init_soft(adap);
2960 ret = t4_sge_init_soft(adap);
2961 else
2962 ret = t4_sge_init_hard(adap);
2963 if (ret < 0) 2935 if (ret < 0)
2964 return ret; 2936 return ret;
2965 2937
@@ -2975,11 +2947,11 @@ int t4_sge_init(struct adapter *adap)
2975 * buffers and a new field which only applies to Packed Mode Free List 2947 * buffers and a new field which only applies to Packed Mode Free List
2976 * buffers. 2948 * buffers.
2977 */ 2949 */
2978 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL); 2950 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
2979 if (is_t4(adap->params.chip)) 2951 if (is_t4(adap->params.chip))
2980 egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl); 2952 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
2981 else 2953 else
2982 egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl); 2954 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
2983 s->fl_starve_thres = 2*egress_threshold + 1; 2955 s->fl_starve_thres = 2*egress_threshold + 1;
2984 2956
2985 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); 2957 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index c132d9030729..4d643b65265e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include "cxgb4.h" 36#include "cxgb4.h"
37#include "t4_regs.h" 37#include "t4_regs.h"
38#include "t4_values.h"
38#include "t4fw_api.h" 39#include "t4fw_api.h"
39 40
40/** 41/**
@@ -149,20 +150,20 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
149 */ 150 */
150void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) 151void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
151{ 152{
152 u32 req = ENABLE | FUNCTION(adap->fn) | reg; 153 u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
153 154
154 if (is_t4(adap->params.chip)) 155 if (is_t4(adap->params.chip))
155 req |= F_LOCALCFG; 156 req |= LOCALCFG_F;
156 157
157 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req); 158 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
158 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA); 159 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
159 160
160 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 161 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
161 * Configuration Space read. (None of the other fields matter when 162 * Configuration Space read. (None of the other fields matter when
162 * ENABLE is 0 so a simple register write is easier than a 163 * ENABLE is 0 so a simple register write is easier than a
163 * read-modify-write via t4_set_reg_field().) 164 * read-modify-write via t4_set_reg_field().)
164 */ 165 */
165 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0); 166 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
166} 167}
167 168
168/* 169/*
@@ -187,8 +188,8 @@ static void t4_report_fw_error(struct adapter *adap)
187 }; 188 };
188 u32 pcie_fw; 189 u32 pcie_fw;
189 190
190 pcie_fw = t4_read_reg(adap, MA_PCIE_FW); 191 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
191 if (pcie_fw & PCIE_FW_ERR) 192 if (pcie_fw & PCIE_FW_ERR_F)
192 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", 193 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
193 reason[PCIE_FW_EVAL_G(pcie_fw)]); 194 reason[PCIE_FW_EVAL_G(pcie_fw)]);
194} 195}
@@ -264,8 +265,8 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
264 u64 res; 265 u64 res;
265 int i, ms, delay_idx; 266 int i, ms, delay_idx;
266 const __be64 *p = cmd; 267 const __be64 *p = cmd;
267 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); 268 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
268 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); 269 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
269 270
270 if ((size & 15) || size > MBOX_LEN) 271 if ((size & 15) || size > MBOX_LEN)
271 return -EINVAL; 272 return -EINVAL;
@@ -277,9 +278,9 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
277 if (adap->pdev->error_state != pci_channel_io_normal) 278 if (adap->pdev->error_state != pci_channel_io_normal)
278 return -EIO; 279 return -EIO;
279 280
280 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 281 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
281 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 282 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
282 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 283 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
283 284
284 if (v != MBOX_OWNER_DRV) 285 if (v != MBOX_OWNER_DRV)
285 return v ? -EBUSY : -ETIMEDOUT; 286 return v ? -EBUSY : -ETIMEDOUT;
@@ -287,7 +288,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
287 for (i = 0; i < size; i += 8) 288 for (i = 0; i < size; i += 8)
288 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); 289 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
289 290
290 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 291 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
291 t4_read_reg(adap, ctl_reg); /* flush write */ 292 t4_read_reg(adap, ctl_reg); /* flush write */
292 293
293 delay_idx = 0; 294 delay_idx = 0;
@@ -303,8 +304,8 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
303 mdelay(ms); 304 mdelay(ms);
304 305
305 v = t4_read_reg(adap, ctl_reg); 306 v = t4_read_reg(adap, ctl_reg);
306 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { 307 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
307 if (!(v & MBMSGVALID)) { 308 if (!(v & MBMSGVALID_F)) {
308 t4_write_reg(adap, ctl_reg, 0); 309 t4_write_reg(adap, ctl_reg, 0);
309 continue; 310 continue;
310 } 311 }
@@ -350,27 +351,27 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
350 u32 mc_bist_status_rdata, mc_bist_data_pattern; 351 u32 mc_bist_status_rdata, mc_bist_data_pattern;
351 352
352 if (is_t4(adap->params.chip)) { 353 if (is_t4(adap->params.chip)) {
353 mc_bist_cmd = MC_BIST_CMD; 354 mc_bist_cmd = MC_BIST_CMD_A;
354 mc_bist_cmd_addr = MC_BIST_CMD_ADDR; 355 mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
355 mc_bist_cmd_len = MC_BIST_CMD_LEN; 356 mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
356 mc_bist_status_rdata = MC_BIST_STATUS_RDATA; 357 mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
357 mc_bist_data_pattern = MC_BIST_DATA_PATTERN; 358 mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
358 } else { 359 } else {
359 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx); 360 mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
360 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx); 361 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
361 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx); 362 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
362 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx); 363 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
363 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx); 364 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
364 } 365 }
365 366
366 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST) 367 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
367 return -EBUSY; 368 return -EBUSY;
368 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU); 369 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
369 t4_write_reg(adap, mc_bist_cmd_len, 64); 370 t4_write_reg(adap, mc_bist_cmd_len, 64);
370 t4_write_reg(adap, mc_bist_data_pattern, 0xc); 371 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
371 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST | 372 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
372 BIST_CMD_GAP(1)); 373 BIST_CMD_GAP_V(1));
373 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1); 374 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
374 if (i) 375 if (i)
375 return i; 376 return i;
376 377
@@ -403,31 +404,31 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
403 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; 404 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
404 405
405 if (is_t4(adap->params.chip)) { 406 if (is_t4(adap->params.chip)) {
406 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); 407 edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
407 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); 408 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
408 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); 409 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
409 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN, 410 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
410 idx);
411 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
412 idx); 411 idx);
412 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
413 idx);
413 } else { 414 } else {
414 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx); 415 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
415 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx); 416 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
416 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx); 417 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
417 edc_bist_cmd_data_pattern = 418 edc_bist_cmd_data_pattern =
418 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx); 419 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
419 edc_bist_status_rdata = 420 edc_bist_status_rdata =
420 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx); 421 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
421 } 422 }
422 423
423 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST) 424 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
424 return -EBUSY; 425 return -EBUSY;
425 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU); 426 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
426 t4_write_reg(adap, edc_bist_cmd_len, 64); 427 t4_write_reg(adap, edc_bist_cmd_len, 64);
427 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 428 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
428 t4_write_reg(adap, edc_bist_cmd, 429 t4_write_reg(adap, edc_bist_cmd,
429 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); 430 BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
430 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1); 431 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
431 if (i) 432 if (i)
432 return i; 433 return i;
433 434
@@ -505,13 +506,13 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
505 * the address is relative to BAR0. 506 * the address is relative to BAR0.
506 */ 507 */
507 mem_reg = t4_read_reg(adap, 508 mem_reg = t4_read_reg(adap,
508 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 509 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
509 win)); 510 win));
510 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10); 511 mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
511 mem_base = GET_PCIEOFST(mem_reg) << 10; 512 mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
512 if (is_t4(adap->params.chip)) 513 if (is_t4(adap->params.chip))
513 mem_base -= adap->t4_bar0; 514 mem_base -= adap->t4_bar0;
514 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); 515 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
515 516
516 /* Calculate our initial PCI-E Memory Window Position and Offset into 517 /* Calculate our initial PCI-E Memory Window Position and Offset into
517 * that Window. 518 * that Window.
@@ -524,10 +525,10 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
524 * attempt to use the new value.) 525 * attempt to use the new value.)
525 */ 526 */
526 t4_write_reg(adap, 527 t4_write_reg(adap,
527 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win), 528 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
528 pos | win_pf); 529 pos | win_pf);
529 t4_read_reg(adap, 530 t4_read_reg(adap,
530 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); 531 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
531 532
532 /* Transfer data to/from the adapter as long as there's an integral 533 /* Transfer data to/from the adapter as long as there's an integral
533 * number of 32-bit transfers to complete. 534 * number of 32-bit transfers to complete.
@@ -552,11 +553,11 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
552 pos += mem_aperture; 553 pos += mem_aperture;
553 offset = 0; 554 offset = 0;
554 t4_write_reg(adap, 555 t4_write_reg(adap,
555 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 556 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
556 win), pos | win_pf); 557 win), pos | win_pf);
557 t4_read_reg(adap, 558 t4_read_reg(adap,
558 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 559 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
559 win)); 560 win));
560 } 561 }
561 } 562 }
562 563
@@ -760,14 +761,13 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
760 761
761 if (!byte_cnt || byte_cnt > 4) 762 if (!byte_cnt || byte_cnt > 4)
762 return -EINVAL; 763 return -EINVAL;
763 if (t4_read_reg(adapter, SF_OP) & SF_BUSY) 764 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
764 return -EBUSY; 765 return -EBUSY;
765 cont = cont ? SF_CONT : 0; 766 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
766 lock = lock ? SF_LOCK : 0; 767 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
767 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); 768 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
768 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
769 if (!ret) 769 if (!ret)
770 *valp = t4_read_reg(adapter, SF_DATA); 770 *valp = t4_read_reg(adapter, SF_DATA_A);
771 return ret; 771 return ret;
772} 772}
773 773
@@ -788,14 +788,12 @@ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
788{ 788{
789 if (!byte_cnt || byte_cnt > 4) 789 if (!byte_cnt || byte_cnt > 4)
790 return -EINVAL; 790 return -EINVAL;
791 if (t4_read_reg(adapter, SF_OP) & SF_BUSY) 791 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
792 return -EBUSY; 792 return -EBUSY;
793 cont = cont ? SF_CONT : 0; 793 t4_write_reg(adapter, SF_DATA_A, val);
794 lock = lock ? SF_LOCK : 0; 794 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
795 t4_write_reg(adapter, SF_DATA, val); 795 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
796 t4_write_reg(adapter, SF_OP, lock | 796 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
797 cont | BYTECNT(byte_cnt - 1) | OP_WR);
798 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
799} 797}
800 798
801/** 799/**
@@ -837,8 +835,8 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
837 * (i.e., big-endian), otherwise as 32-bit words in the platform's 835 * (i.e., big-endian), otherwise as 32-bit words in the platform's
838 * natural endianess. 836 * natural endianess.
839 */ 837 */
840static int t4_read_flash(struct adapter *adapter, unsigned int addr, 838int t4_read_flash(struct adapter *adapter, unsigned int addr,
841 unsigned int nwords, u32 *data, int byte_oriented) 839 unsigned int nwords, u32 *data, int byte_oriented)
842{ 840{
843 int ret; 841 int ret;
844 842
@@ -854,7 +852,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
854 for ( ; nwords; nwords--, data++) { 852 for ( ; nwords; nwords--, data++) {
855 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 853 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
856 if (nwords == 1) 854 if (nwords == 1)
857 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 855 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
858 if (ret) 856 if (ret)
859 return ret; 857 return ret;
860 if (byte_oriented) 858 if (byte_oriented)
@@ -902,7 +900,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
902 if (ret) 900 if (ret)
903 goto unlock; 901 goto unlock;
904 902
905 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 903 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
906 904
907 /* Read the page to verify the write succeeded */ 905 /* Read the page to verify the write succeeded */
908 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 906 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
@@ -918,7 +916,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
918 return 0; 916 return 0;
919 917
920unlock: 918unlock:
921 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 919 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
922 return ret; 920 return ret;
923} 921}
924 922
@@ -950,6 +948,43 @@ int t4_get_tp_version(struct adapter *adapter, u32 *vers)
950 1, vers, 0); 948 1, vers, 0);
951} 949}
952 950
951/**
952 * t4_get_exprom_version - return the Expansion ROM version (if any)
953 * @adapter: the adapter
954 * @vers: where to place the version
955 *
956 * Reads the Expansion ROM header from FLASH and returns the version
957 * number (if present) through the @vers return value pointer. We return
958 * this in the Firmware Version Format since it's convenient. Return
959 * 0 on success, -ENOENT if no Expansion ROM is present.
960 */
961int t4_get_exprom_version(struct adapter *adap, u32 *vers)
962{
963 struct exprom_header {
964 unsigned char hdr_arr[16]; /* must start with 0x55aa */
965 unsigned char hdr_ver[4]; /* Expansion ROM version */
966 } *hdr;
967 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
968 sizeof(u32))];
969 int ret;
970
971 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
972 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
973 0);
974 if (ret)
975 return ret;
976
977 hdr = (struct exprom_header *)exprom_header_buf;
978 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
979 return -ENOENT;
980
981 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
982 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
983 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
984 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
985 return 0;
986}
987
953/* Is the given firmware API compatible with the one the driver was compiled 988/* Is the given firmware API compatible with the one the driver was compiled
954 * with? 989 * with?
955 */ 990 */
@@ -1113,7 +1148,7 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1113 } 1148 }
1114 start++; 1149 start++;
1115 } 1150 }
1116 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 1151 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
1117 return ret; 1152 return ret;
1118} 1153}
1119 1154
@@ -1241,6 +1276,45 @@ out:
1241 return ret; 1276 return ret;
1242} 1277}
1243 1278
1279/**
1280 * t4_fwcache - firmware cache operation
1281 * @adap: the adapter
1282 * @op : the operation (flush or flush and invalidate)
1283 */
1284int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
1285{
1286 struct fw_params_cmd c;
1287
1288 memset(&c, 0, sizeof(c));
1289 c.op_to_vfn =
1290 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
1291 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
1292 FW_PARAMS_CMD_PFN_V(adap->fn) |
1293 FW_PARAMS_CMD_VFN_V(0));
1294 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
1295 c.param[0].mnem =
1296 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1297 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
1298 c.param[0].val = (__force __be32)op;
1299
1300 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
1301}
1302
1303void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1304{
1305 unsigned int i, j;
1306
1307 for (i = 0; i < 8; i++) {
1308 u32 *p = la_buf + i;
1309
1310 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
1311 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
1312 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
1313 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1314 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
1315 }
1316}
1317
1244#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1318#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1245 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 1319 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1246 FW_PORT_CAP_ANEG) 1320 FW_PORT_CAP_ANEG)
@@ -1365,95 +1439,97 @@ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1365static void pcie_intr_handler(struct adapter *adapter) 1439static void pcie_intr_handler(struct adapter *adapter)
1366{ 1440{
1367 static const struct intr_info sysbus_intr_info[] = { 1441 static const struct intr_info sysbus_intr_info[] = {
1368 { RNPP, "RXNP array parity error", -1, 1 }, 1442 { RNPP_F, "RXNP array parity error", -1, 1 },
1369 { RPCP, "RXPC array parity error", -1, 1 }, 1443 { RPCP_F, "RXPC array parity error", -1, 1 },
1370 { RCIP, "RXCIF array parity error", -1, 1 }, 1444 { RCIP_F, "RXCIF array parity error", -1, 1 },
1371 { RCCP, "Rx completions control array parity error", -1, 1 }, 1445 { RCCP_F, "Rx completions control array parity error", -1, 1 },
1372 { RFTP, "RXFT array parity error", -1, 1 }, 1446 { RFTP_F, "RXFT array parity error", -1, 1 },
1373 { 0 } 1447 { 0 }
1374 }; 1448 };
1375 static const struct intr_info pcie_port_intr_info[] = { 1449 static const struct intr_info pcie_port_intr_info[] = {
1376 { TPCP, "TXPC array parity error", -1, 1 }, 1450 { TPCP_F, "TXPC array parity error", -1, 1 },
1377 { TNPP, "TXNP array parity error", -1, 1 }, 1451 { TNPP_F, "TXNP array parity error", -1, 1 },
1378 { TFTP, "TXFT array parity error", -1, 1 }, 1452 { TFTP_F, "TXFT array parity error", -1, 1 },
1379 { TCAP, "TXCA array parity error", -1, 1 }, 1453 { TCAP_F, "TXCA array parity error", -1, 1 },
1380 { TCIP, "TXCIF array parity error", -1, 1 }, 1454 { TCIP_F, "TXCIF array parity error", -1, 1 },
1381 { RCAP, "RXCA array parity error", -1, 1 }, 1455 { RCAP_F, "RXCA array parity error", -1, 1 },
1382 { OTDD, "outbound request TLP discarded", -1, 1 }, 1456 { OTDD_F, "outbound request TLP discarded", -1, 1 },
1383 { RDPE, "Rx data parity error", -1, 1 }, 1457 { RDPE_F, "Rx data parity error", -1, 1 },
1384 { TDUE, "Tx uncorrectable data error", -1, 1 }, 1458 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
1385 { 0 } 1459 { 0 }
1386 }; 1460 };
1387 static const struct intr_info pcie_intr_info[] = { 1461 static const struct intr_info pcie_intr_info[] = {
1388 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 1462 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
1389 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 1463 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
1390 { MSIDATAPERR, "MSI data parity error", -1, 1 }, 1464 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
1391 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1465 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
1392 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1466 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
1393 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1467 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
1394 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1468 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
1395 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 1469 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
1396 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 1470 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
1397 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1471 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
1398 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 1472 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
1399 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1473 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
1400 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1474 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
1401 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 1475 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
1402 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1476 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
1403 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1477 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
1404 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 1478 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
1405 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1479 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
1406 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1480 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
1407 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1481 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
1408 { FIDPERR, "PCI FID parity error", -1, 1 }, 1482 { FIDPERR_F, "PCI FID parity error", -1, 1 },
1409 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 1483 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
1410 { MATAGPERR, "PCI MA tag parity error", -1, 1 }, 1484 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
1411 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1485 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
1412 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 1486 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
1413 { RXWRPERR, "PCI Rx write parity error", -1, 1 }, 1487 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
1414 { RPLPERR, "PCI replay buffer parity error", -1, 1 }, 1488 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
1415 { PCIESINT, "PCI core secondary fault", -1, 1 }, 1489 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
1416 { PCIEPINT, "PCI core primary fault", -1, 1 }, 1490 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
1417 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, 1491 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
1492 -1, 0 },
1418 { 0 } 1493 { 0 }
1419 }; 1494 };
1420 1495
1421 static struct intr_info t5_pcie_intr_info[] = { 1496 static struct intr_info t5_pcie_intr_info[] = {
1422 { MSTGRPPERR, "Master Response Read Queue parity error", 1497 { MSTGRPPERR_F, "Master Response Read Queue parity error",
1423 -1, 1 }, 1498 -1, 1 },
1424 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 1499 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
1425 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 1500 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
1426 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1501 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
1427 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1502 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
1428 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1503 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
1429 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1504 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
1430 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 1505 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
1431 -1, 1 }, 1506 -1, 1 },
1432 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 1507 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
1433 -1, 1 }, 1508 -1, 1 },
1434 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1509 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
1435 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 1510 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
1436 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1511 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
1437 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1512 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
1438 { DREQWRPERR, "PCI DMA channel write request parity error", 1513 { DREQWRPERR_F, "PCI DMA channel write request parity error",
1439 -1, 1 }, 1514 -1, 1 },
1440 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1515 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
1441 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1516 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
1442 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 1517 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
1443 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1518 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
1444 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1519 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
1445 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1520 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
1446 { FIDPERR, "PCI FID parity error", -1, 1 }, 1521 { FIDPERR_F, "PCI FID parity error", -1, 1 },
1447 { VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 1522 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
1448 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 1523 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
1449 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1524 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
1450 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 1525 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
1451 -1, 1 }, 1526 -1, 1 },
1452 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 }, 1527 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
1453 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 1528 -1, 1 },
1454 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 1529 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
1455 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 1530 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
1456 { READRSPERR, "Outbound read error", -1, 0 }, 1531 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1532 { READRSPERR_F, "Outbound read error", -1, 0 },
1457 { 0 } 1533 { 0 }
1458 }; 1534 };
1459 1535
@@ -1461,15 +1537,15 @@ static void pcie_intr_handler(struct adapter *adapter)
1461 1537
1462 if (is_t4(adapter->params.chip)) 1538 if (is_t4(adapter->params.chip))
1463 fat = t4_handle_intr_status(adapter, 1539 fat = t4_handle_intr_status(adapter,
1464 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 1540 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
1465 sysbus_intr_info) + 1541 sysbus_intr_info) +
1466 t4_handle_intr_status(adapter, 1542 t4_handle_intr_status(adapter,
1467 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1543 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
1468 pcie_port_intr_info) + 1544 pcie_port_intr_info) +
1469 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1545 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
1470 pcie_intr_info); 1546 pcie_intr_info);
1471 else 1547 else
1472 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1548 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
1473 t5_pcie_intr_info); 1549 t5_pcie_intr_info);
1474 1550
1475 if (fat) 1551 if (fat)
@@ -1483,11 +1559,11 @@ static void tp_intr_handler(struct adapter *adapter)
1483{ 1559{
1484 static const struct intr_info tp_intr_info[] = { 1560 static const struct intr_info tp_intr_info[] = {
1485 { 0x3fffffff, "TP parity error", -1, 1 }, 1561 { 0x3fffffff, "TP parity error", -1, 1 },
1486 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 1562 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
1487 { 0 } 1563 { 0 }
1488 }; 1564 };
1489 1565
1490 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) 1566 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
1491 t4_fatal_err(adapter); 1567 t4_fatal_err(adapter);
1492} 1568}
1493 1569
@@ -1499,102 +1575,107 @@ static void sge_intr_handler(struct adapter *adapter)
1499 u64 v; 1575 u64 v;
1500 1576
1501 static const struct intr_info sge_intr_info[] = { 1577 static const struct intr_info sge_intr_info[] = {
1502 { ERR_CPL_EXCEED_IQE_SIZE, 1578 { ERR_CPL_EXCEED_IQE_SIZE_F,
1503 "SGE received CPL exceeding IQE size", -1, 1 }, 1579 "SGE received CPL exceeding IQE size", -1, 1 },
1504 { ERR_INVALID_CIDX_INC, 1580 { ERR_INVALID_CIDX_INC_F,
1505 "SGE GTS CIDX increment too large", -1, 0 }, 1581 "SGE GTS CIDX increment too large", -1, 0 },
1506 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 1582 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
1507 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 1583 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
1508 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 1584 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
1509 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 1585 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
1510 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 1586 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
1511 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 1587 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1512 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 1588 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
1513 0 }, 1589 0 },
1514 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 1590 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
1515 0 }, 1591 0 },
1516 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 1592 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
1517 0 }, 1593 0 },
1518 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 1594 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
1519 0 }, 1595 0 },
1520 { ERR_ING_CTXT_PRIO, 1596 { ERR_ING_CTXT_PRIO_F,
1521 "SGE too many priority ingress contexts", -1, 0 }, 1597 "SGE too many priority ingress contexts", -1, 0 },
1522 { ERR_EGR_CTXT_PRIO, 1598 { ERR_EGR_CTXT_PRIO_F,
1523 "SGE too many priority egress contexts", -1, 0 }, 1599 "SGE too many priority egress contexts", -1, 0 },
1524 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 1600 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
1525 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 1601 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
1526 { 0 } 1602 { 0 }
1527 }; 1603 };
1528 1604
1529 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | 1605 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
1530 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); 1606 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
1531 if (v) { 1607 if (v) {
1532 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", 1608 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1533 (unsigned long long)v); 1609 (unsigned long long)v);
1534 t4_write_reg(adapter, SGE_INT_CAUSE1, v); 1610 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
1535 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); 1611 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
1536 } 1612 }
1537 1613
1538 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || 1614 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
1539 v != 0) 1615 v != 0)
1540 t4_fatal_err(adapter); 1616 t4_fatal_err(adapter);
1541} 1617}
1542 1618
1619#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
1620 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
1621#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
1622 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
1623
1543/* 1624/*
1544 * CIM interrupt handler. 1625 * CIM interrupt handler.
1545 */ 1626 */
1546static void cim_intr_handler(struct adapter *adapter) 1627static void cim_intr_handler(struct adapter *adapter)
1547{ 1628{
1548 static const struct intr_info cim_intr_info[] = { 1629 static const struct intr_info cim_intr_info[] = {
1549 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 1630 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
1550 { OBQPARERR, "CIM OBQ parity error", -1, 1 }, 1631 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1551 { IBQPARERR, "CIM IBQ parity error", -1, 1 }, 1632 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1552 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 1633 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
1553 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 1634 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
1554 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 1635 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
1555 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 1636 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
1556 { 0 } 1637 { 0 }
1557 }; 1638 };
1558 static const struct intr_info cim_upintr_info[] = { 1639 static const struct intr_info cim_upintr_info[] = {
1559 { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 1640 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
1560 { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 1641 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
1561 { ILLWRINT, "CIM illegal write", -1, 1 }, 1642 { ILLWRINT_F, "CIM illegal write", -1, 1 },
1562 { ILLRDINT, "CIM illegal read", -1, 1 }, 1643 { ILLRDINT_F, "CIM illegal read", -1, 1 },
1563 { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 1644 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
1564 { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 1645 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
1565 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 1646 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
1566 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 1647 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
1567 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 1648 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
1568 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 1649 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
1569 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 1650 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
1570 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 1651 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
1571 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 1652 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
1572 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 1653 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
1573 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 1654 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
1574 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 1655 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
1575 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 1656 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
1576 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 1657 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
1577 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 1658 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
1578 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 1659 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
1579 { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 1660 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
1580 { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 1661 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
1581 { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 1662 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
1582 { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 1663 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
1583 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 1664 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
1584 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 1665 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
1585 { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 1666 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
1586 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 1667 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
1587 { 0 } 1668 { 0 }
1588 }; 1669 };
1589 1670
1590 int fat; 1671 int fat;
1591 1672
1592 if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR) 1673 if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
1593 t4_report_fw_error(adapter); 1674 t4_report_fw_error(adapter);
1594 1675
1595 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, 1676 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
1596 cim_intr_info) + 1677 cim_intr_info) +
1597 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, 1678 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
1598 cim_upintr_info); 1679 cim_upintr_info);
1599 if (fat) 1680 if (fat)
1600 t4_fatal_err(adapter); 1681 t4_fatal_err(adapter);
@@ -1611,7 +1692,7 @@ static void ulprx_intr_handler(struct adapter *adapter)
1611 { 0 } 1692 { 0 }
1612 }; 1693 };
1613 1694
1614 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) 1695 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
1615 t4_fatal_err(adapter); 1696 t4_fatal_err(adapter);
1616} 1697}
1617 1698
@@ -1621,19 +1702,19 @@ static void ulprx_intr_handler(struct adapter *adapter)
1621static void ulptx_intr_handler(struct adapter *adapter) 1702static void ulptx_intr_handler(struct adapter *adapter)
1622{ 1703{
1623 static const struct intr_info ulptx_intr_info[] = { 1704 static const struct intr_info ulptx_intr_info[] = {
1624 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 1705 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
1625 0 }, 1706 0 },
1626 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 1707 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
1627 0 }, 1708 0 },
1628 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 1709 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
1629 0 }, 1710 0 },
1630 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 1711 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
1631 0 }, 1712 0 },
1632 { 0xfffffff, "ULPTX parity error", -1, 1 }, 1713 { 0xfffffff, "ULPTX parity error", -1, 1 },
1633 { 0 } 1714 { 0 }
1634 }; 1715 };
1635 1716
1636 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) 1717 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
1637 t4_fatal_err(adapter); 1718 t4_fatal_err(adapter);
1638} 1719}
1639 1720
@@ -1643,19 +1724,20 @@ static void ulptx_intr_handler(struct adapter *adapter)
1643static void pmtx_intr_handler(struct adapter *adapter) 1724static void pmtx_intr_handler(struct adapter *adapter)
1644{ 1725{
1645 static const struct intr_info pmtx_intr_info[] = { 1726 static const struct intr_info pmtx_intr_info[] = {
1646 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 1727 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
1647 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 1728 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
1648 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 1729 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
1649 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 1730 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
1650 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, 1731 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
1651 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 1732 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
1652 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, 1733 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
1653 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 1734 -1, 1 },
1654 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 1735 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
1736 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
1655 { 0 } 1737 { 0 }
1656 }; 1738 };
1657 1739
1658 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) 1740 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
1659 t4_fatal_err(adapter); 1741 t4_fatal_err(adapter);
1660} 1742}
1661 1743
@@ -1665,16 +1747,17 @@ static void pmtx_intr_handler(struct adapter *adapter)
1665static void pmrx_intr_handler(struct adapter *adapter) 1747static void pmrx_intr_handler(struct adapter *adapter)
1666{ 1748{
1667 static const struct intr_info pmrx_intr_info[] = { 1749 static const struct intr_info pmrx_intr_info[] = {
1668 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 1750 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
1669 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, 1751 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
1670 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 1752 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
1671 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, 1753 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
1672 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 1754 -1, 1 },
1673 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 1755 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
1756 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
1674 { 0 } 1757 { 0 }
1675 }; 1758 };
1676 1759
1677 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) 1760 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
1678 t4_fatal_err(adapter); 1761 t4_fatal_err(adapter);
1679} 1762}
1680 1763
@@ -1684,16 +1767,16 @@ static void pmrx_intr_handler(struct adapter *adapter)
1684static void cplsw_intr_handler(struct adapter *adapter) 1767static void cplsw_intr_handler(struct adapter *adapter)
1685{ 1768{
1686 static const struct intr_info cplsw_intr_info[] = { 1769 static const struct intr_info cplsw_intr_info[] = {
1687 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 1770 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
1688 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 1771 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
1689 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 1772 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
1690 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 1773 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
1691 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 1774 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
1692 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 1775 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
1693 { 0 } 1776 { 0 }
1694 }; 1777 };
1695 1778
1696 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) 1779 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
1697 t4_fatal_err(adapter); 1780 t4_fatal_err(adapter);
1698} 1781}
1699 1782
@@ -1703,15 +1786,15 @@ static void cplsw_intr_handler(struct adapter *adapter)
1703static void le_intr_handler(struct adapter *adap) 1786static void le_intr_handler(struct adapter *adap)
1704{ 1787{
1705 static const struct intr_info le_intr_info[] = { 1788 static const struct intr_info le_intr_info[] = {
1706 { LIPMISS, "LE LIP miss", -1, 0 }, 1789 { LIPMISS_F, "LE LIP miss", -1, 0 },
1707 { LIP0, "LE 0 LIP error", -1, 0 }, 1790 { LIP0_F, "LE 0 LIP error", -1, 0 },
1708 { PARITYERR, "LE parity error", -1, 1 }, 1791 { PARITYERR_F, "LE parity error", -1, 1 },
1709 { UNKNOWNCMD, "LE unknown command", -1, 1 }, 1792 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
1710 { REQQPARERR, "LE request queue parity error", -1, 1 }, 1793 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
1711 { 0 } 1794 { 0 }
1712 }; 1795 };
1713 1796
1714 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) 1797 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
1715 t4_fatal_err(adap); 1798 t4_fatal_err(adap);
1716} 1799}
1717 1800
@@ -1725,19 +1808,22 @@ static void mps_intr_handler(struct adapter *adapter)
1725 { 0 } 1808 { 0 }
1726 }; 1809 };
1727 static const struct intr_info mps_tx_intr_info[] = { 1810 static const struct intr_info mps_tx_intr_info[] = {
1728 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, 1811 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
1729 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 1812 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1730 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, 1813 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
1731 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, 1814 -1, 1 },
1732 { BUBBLE, "MPS Tx underflow", -1, 1 }, 1815 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
1733 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 1816 -1, 1 },
1734 { FRMERR, "MPS Tx framing error", -1, 1 }, 1817 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
1818 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
1819 { FRMERR_F, "MPS Tx framing error", -1, 1 },
1735 { 0 } 1820 { 0 }
1736 }; 1821 };
1737 static const struct intr_info mps_trc_intr_info[] = { 1822 static const struct intr_info mps_trc_intr_info[] = {
1738 { FILTMEM, "MPS TRC filter parity error", -1, 1 }, 1823 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
1739 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, 1824 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
1740 { MISCPERR, "MPS TRC misc parity error", -1, 1 }, 1825 -1, 1 },
1826 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
1741 { 0 } 1827 { 0 }
1742 }; 1828 };
1743 static const struct intr_info mps_stat_sram_intr_info[] = { 1829 static const struct intr_info mps_stat_sram_intr_info[] = {
@@ -1753,37 +1839,37 @@ static void mps_intr_handler(struct adapter *adapter)
1753 { 0 } 1839 { 0 }
1754 }; 1840 };
1755 static const struct intr_info mps_cls_intr_info[] = { 1841 static const struct intr_info mps_cls_intr_info[] = {
1756 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 1842 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
1757 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 1843 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
1758 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 1844 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
1759 { 0 } 1845 { 0 }
1760 }; 1846 };
1761 1847
1762 int fat; 1848 int fat;
1763 1849
1764 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, 1850 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
1765 mps_rx_intr_info) + 1851 mps_rx_intr_info) +
1766 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, 1852 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
1767 mps_tx_intr_info) + 1853 mps_tx_intr_info) +
1768 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, 1854 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
1769 mps_trc_intr_info) + 1855 mps_trc_intr_info) +
1770 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, 1856 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
1771 mps_stat_sram_intr_info) + 1857 mps_stat_sram_intr_info) +
1772 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 1858 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
1773 mps_stat_tx_intr_info) + 1859 mps_stat_tx_intr_info) +
1774 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 1860 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
1775 mps_stat_rx_intr_info) + 1861 mps_stat_rx_intr_info) +
1776 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, 1862 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
1777 mps_cls_intr_info); 1863 mps_cls_intr_info);
1778 1864
1779 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | 1865 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
1780 RXINT | TXINT | STATINT); 1866 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
1781 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1782 if (fat) 1867 if (fat)
1783 t4_fatal_err(adapter); 1868 t4_fatal_err(adapter);
1784} 1869}
1785 1870
1786#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) 1871#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
1872 ECC_UE_INT_CAUSE_F)
1787 1873
1788/* 1874/*
1789 * EDC/MC interrupt handler. 1875 * EDC/MC interrupt handler.
@@ -1795,40 +1881,40 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
1795 unsigned int addr, cnt_addr, v; 1881 unsigned int addr, cnt_addr, v;
1796 1882
1797 if (idx <= MEM_EDC1) { 1883 if (idx <= MEM_EDC1) {
1798 addr = EDC_REG(EDC_INT_CAUSE, idx); 1884 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
1799 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); 1885 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
1800 } else if (idx == MEM_MC) { 1886 } else if (idx == MEM_MC) {
1801 if (is_t4(adapter->params.chip)) { 1887 if (is_t4(adapter->params.chip)) {
1802 addr = MC_INT_CAUSE; 1888 addr = MC_INT_CAUSE_A;
1803 cnt_addr = MC_ECC_STATUS; 1889 cnt_addr = MC_ECC_STATUS_A;
1804 } else { 1890 } else {
1805 addr = MC_P_INT_CAUSE; 1891 addr = MC_P_INT_CAUSE_A;
1806 cnt_addr = MC_P_ECC_STATUS; 1892 cnt_addr = MC_P_ECC_STATUS_A;
1807 } 1893 }
1808 } else { 1894 } else {
1809 addr = MC_REG(MC_P_INT_CAUSE, 1); 1895 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
1810 cnt_addr = MC_REG(MC_P_ECC_STATUS, 1); 1896 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
1811 } 1897 }
1812 1898
1813 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 1899 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1814 if (v & PERR_INT_CAUSE) 1900 if (v & PERR_INT_CAUSE_F)
1815 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", 1901 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1816 name[idx]); 1902 name[idx]);
1817 if (v & ECC_CE_INT_CAUSE) { 1903 if (v & ECC_CE_INT_CAUSE_F) {
1818 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); 1904 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
1819 1905
1820 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); 1906 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
1821 if (printk_ratelimit()) 1907 if (printk_ratelimit())
1822 dev_warn(adapter->pdev_dev, 1908 dev_warn(adapter->pdev_dev,
1823 "%u %s correctable ECC data error%s\n", 1909 "%u %s correctable ECC data error%s\n",
1824 cnt, name[idx], cnt > 1 ? "s" : ""); 1910 cnt, name[idx], cnt > 1 ? "s" : "");
1825 } 1911 }
1826 if (v & ECC_UE_INT_CAUSE) 1912 if (v & ECC_UE_INT_CAUSE_F)
1827 dev_alert(adapter->pdev_dev, 1913 dev_alert(adapter->pdev_dev,
1828 "%s uncorrectable ECC data error\n", name[idx]); 1914 "%s uncorrectable ECC data error\n", name[idx]);
1829 1915
1830 t4_write_reg(adapter, addr, v); 1916 t4_write_reg(adapter, addr, v);
1831 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) 1917 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
1832 t4_fatal_err(adapter); 1918 t4_fatal_err(adapter);
1833} 1919}
1834 1920
@@ -1837,26 +1923,26 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
1837 */ 1923 */
1838static void ma_intr_handler(struct adapter *adap) 1924static void ma_intr_handler(struct adapter *adap)
1839{ 1925{
1840 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); 1926 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
1841 1927
1842 if (status & MEM_PERR_INT_CAUSE) { 1928 if (status & MEM_PERR_INT_CAUSE_F) {
1843 dev_alert(adap->pdev_dev, 1929 dev_alert(adap->pdev_dev,
1844 "MA parity error, parity status %#x\n", 1930 "MA parity error, parity status %#x\n",
1845 t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); 1931 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
1846 if (is_t5(adap->params.chip)) 1932 if (is_t5(adap->params.chip))
1847 dev_alert(adap->pdev_dev, 1933 dev_alert(adap->pdev_dev,
1848 "MA parity error, parity status %#x\n", 1934 "MA parity error, parity status %#x\n",
1849 t4_read_reg(adap, 1935 t4_read_reg(adap,
1850 MA_PARITY_ERROR_STATUS2)); 1936 MA_PARITY_ERROR_STATUS2_A));
1851 } 1937 }
1852 if (status & MEM_WRAP_INT_CAUSE) { 1938 if (status & MEM_WRAP_INT_CAUSE_F) {
1853 v = t4_read_reg(adap, MA_INT_WRAP_STATUS); 1939 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
1854 dev_alert(adap->pdev_dev, "MA address wrap-around error by " 1940 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1855 "client %u to address %#x\n", 1941 "client %u to address %#x\n",
1856 MEM_WRAP_CLIENT_NUM_GET(v), 1942 MEM_WRAP_CLIENT_NUM_G(v),
1857 MEM_WRAP_ADDRESS_GET(v) << 4); 1943 MEM_WRAP_ADDRESS_G(v) << 4);
1858 } 1944 }
1859 t4_write_reg(adap, MA_INT_CAUSE, status); 1945 t4_write_reg(adap, MA_INT_CAUSE_A, status);
1860 t4_fatal_err(adap); 1946 t4_fatal_err(adap);
1861} 1947}
1862 1948
@@ -1866,13 +1952,13 @@ static void ma_intr_handler(struct adapter *adap)
1866static void smb_intr_handler(struct adapter *adap) 1952static void smb_intr_handler(struct adapter *adap)
1867{ 1953{
1868 static const struct intr_info smb_intr_info[] = { 1954 static const struct intr_info smb_intr_info[] = {
1869 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 1955 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
1870 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 1956 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
1871 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 1957 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
1872 { 0 } 1958 { 0 }
1873 }; 1959 };
1874 1960
1875 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) 1961 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
1876 t4_fatal_err(adap); 1962 t4_fatal_err(adap);
1877} 1963}
1878 1964
@@ -1882,14 +1968,14 @@ static void smb_intr_handler(struct adapter *adap)
1882static void ncsi_intr_handler(struct adapter *adap) 1968static void ncsi_intr_handler(struct adapter *adap)
1883{ 1969{
1884 static const struct intr_info ncsi_intr_info[] = { 1970 static const struct intr_info ncsi_intr_info[] = {
1885 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 1971 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
1886 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 1972 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
1887 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 1973 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
1888 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 1974 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
1889 { 0 } 1975 { 0 }
1890 }; 1976 };
1891 1977
1892 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) 1978 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
1893 t4_fatal_err(adap); 1979 t4_fatal_err(adap);
1894} 1980}
1895 1981
@@ -1901,23 +1987,23 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
1901 u32 v, int_cause_reg; 1987 u32 v, int_cause_reg;
1902 1988
1903 if (is_t4(adap->params.chip)) 1989 if (is_t4(adap->params.chip))
1904 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); 1990 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
1905 else 1991 else
1906 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); 1992 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
1907 1993
1908 v = t4_read_reg(adap, int_cause_reg); 1994 v = t4_read_reg(adap, int_cause_reg);
1909 1995
1910 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 1996 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
1911 if (!v) 1997 if (!v)
1912 return; 1998 return;
1913 1999
1914 if (v & TXFIFO_PRTY_ERR) 2000 if (v & TXFIFO_PRTY_ERR_F)
1915 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", 2001 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1916 port); 2002 port);
1917 if (v & RXFIFO_PRTY_ERR) 2003 if (v & RXFIFO_PRTY_ERR_F)
1918 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", 2004 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1919 port); 2005 port);
1920 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); 2006 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
1921 t4_fatal_err(adap); 2007 t4_fatal_err(adap);
1922} 2008}
1923 2009
@@ -1927,19 +2013,19 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
1927static void pl_intr_handler(struct adapter *adap) 2013static void pl_intr_handler(struct adapter *adap)
1928{ 2014{
1929 static const struct intr_info pl_intr_info[] = { 2015 static const struct intr_info pl_intr_info[] = {
1930 { FATALPERR, "T4 fatal parity error", -1, 1 }, 2016 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
1931 { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 2017 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
1932 { 0 } 2018 { 0 }
1933 }; 2019 };
1934 2020
1935 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) 2021 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
1936 t4_fatal_err(adap); 2022 t4_fatal_err(adap);
1937} 2023}
1938 2024
1939#define PF_INTR_MASK (PFSW) 2025#define PF_INTR_MASK (PFSW_F)
1940#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ 2026#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
1941 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ 2027 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
1942 CPL_SWITCH | SGE | ULP_TX) 2028 CPL_SWITCH_F | SGE_F | ULP_TX_F)
1943 2029
1944/** 2030/**
1945 * t4_slow_intr_handler - control path interrupt handler 2031 * t4_slow_intr_handler - control path interrupt handler
@@ -1951,60 +2037,60 @@ static void pl_intr_handler(struct adapter *adap)
1951 */ 2037 */
1952int t4_slow_intr_handler(struct adapter *adapter) 2038int t4_slow_intr_handler(struct adapter *adapter)
1953{ 2039{
1954 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); 2040 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
1955 2041
1956 if (!(cause & GLBL_INTR_MASK)) 2042 if (!(cause & GLBL_INTR_MASK))
1957 return 0; 2043 return 0;
1958 if (cause & CIM) 2044 if (cause & CIM_F)
1959 cim_intr_handler(adapter); 2045 cim_intr_handler(adapter);
1960 if (cause & MPS) 2046 if (cause & MPS_F)
1961 mps_intr_handler(adapter); 2047 mps_intr_handler(adapter);
1962 if (cause & NCSI) 2048 if (cause & NCSI_F)
1963 ncsi_intr_handler(adapter); 2049 ncsi_intr_handler(adapter);
1964 if (cause & PL) 2050 if (cause & PL_F)
1965 pl_intr_handler(adapter); 2051 pl_intr_handler(adapter);
1966 if (cause & SMB) 2052 if (cause & SMB_F)
1967 smb_intr_handler(adapter); 2053 smb_intr_handler(adapter);
1968 if (cause & XGMAC0) 2054 if (cause & XGMAC0_F)
1969 xgmac_intr_handler(adapter, 0); 2055 xgmac_intr_handler(adapter, 0);
1970 if (cause & XGMAC1) 2056 if (cause & XGMAC1_F)
1971 xgmac_intr_handler(adapter, 1); 2057 xgmac_intr_handler(adapter, 1);
1972 if (cause & XGMAC_KR0) 2058 if (cause & XGMAC_KR0_F)
1973 xgmac_intr_handler(adapter, 2); 2059 xgmac_intr_handler(adapter, 2);
1974 if (cause & XGMAC_KR1) 2060 if (cause & XGMAC_KR1_F)
1975 xgmac_intr_handler(adapter, 3); 2061 xgmac_intr_handler(adapter, 3);
1976 if (cause & PCIE) 2062 if (cause & PCIE_F)
1977 pcie_intr_handler(adapter); 2063 pcie_intr_handler(adapter);
1978 if (cause & MC) 2064 if (cause & MC_F)
1979 mem_intr_handler(adapter, MEM_MC); 2065 mem_intr_handler(adapter, MEM_MC);
1980 if (!is_t4(adapter->params.chip) && (cause & MC1)) 2066 if (!is_t4(adapter->params.chip) && (cause & MC1_S))
1981 mem_intr_handler(adapter, MEM_MC1); 2067 mem_intr_handler(adapter, MEM_MC1);
1982 if (cause & EDC0) 2068 if (cause & EDC0_F)
1983 mem_intr_handler(adapter, MEM_EDC0); 2069 mem_intr_handler(adapter, MEM_EDC0);
1984 if (cause & EDC1) 2070 if (cause & EDC1_F)
1985 mem_intr_handler(adapter, MEM_EDC1); 2071 mem_intr_handler(adapter, MEM_EDC1);
1986 if (cause & LE) 2072 if (cause & LE_F)
1987 le_intr_handler(adapter); 2073 le_intr_handler(adapter);
1988 if (cause & TP) 2074 if (cause & TP_F)
1989 tp_intr_handler(adapter); 2075 tp_intr_handler(adapter);
1990 if (cause & MA) 2076 if (cause & MA_F)
1991 ma_intr_handler(adapter); 2077 ma_intr_handler(adapter);
1992 if (cause & PM_TX) 2078 if (cause & PM_TX_F)
1993 pmtx_intr_handler(adapter); 2079 pmtx_intr_handler(adapter);
1994 if (cause & PM_RX) 2080 if (cause & PM_RX_F)
1995 pmrx_intr_handler(adapter); 2081 pmrx_intr_handler(adapter);
1996 if (cause & ULP_RX) 2082 if (cause & ULP_RX_F)
1997 ulprx_intr_handler(adapter); 2083 ulprx_intr_handler(adapter);
1998 if (cause & CPL_SWITCH) 2084 if (cause & CPL_SWITCH_F)
1999 cplsw_intr_handler(adapter); 2085 cplsw_intr_handler(adapter);
2000 if (cause & SGE) 2086 if (cause & SGE_F)
2001 sge_intr_handler(adapter); 2087 sge_intr_handler(adapter);
2002 if (cause & ULP_TX) 2088 if (cause & ULP_TX_F)
2003 ulptx_intr_handler(adapter); 2089 ulptx_intr_handler(adapter);
2004 2090
2005 /* Clear the interrupts just processed for which we are the master. */ 2091 /* Clear the interrupts just processed for which we are the master. */
2006 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); 2092 t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
2007 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ 2093 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
2008 return 1; 2094 return 1;
2009} 2095}
2010 2096
@@ -2023,19 +2109,19 @@ int t4_slow_intr_handler(struct adapter *adapter)
2023 */ 2109 */
2024void t4_intr_enable(struct adapter *adapter) 2110void t4_intr_enable(struct adapter *adapter)
2025{ 2111{
2026 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 2112 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
2027 2113
2028 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | 2114 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
2029 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | 2115 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
2030 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | 2116 ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
2031 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | 2117 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
2032 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 2118 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
2033 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 2119 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
2034 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | 2120 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
2035 DBFIFO_HP_INT | DBFIFO_LP_INT | 2121 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
2036 EGRESS_SIZE_ERR); 2122 EGRESS_SIZE_ERR_F);
2037 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); 2123 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
2038 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); 2124 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
2039} 2125}
2040 2126
2041/** 2127/**
@@ -2048,10 +2134,10 @@ void t4_intr_enable(struct adapter *adapter)
2048 */ 2134 */
2049void t4_intr_disable(struct adapter *adapter) 2135void t4_intr_disable(struct adapter *adapter)
2050{ 2136{
2051 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 2137 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
2052 2138
2053 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); 2139 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
2054 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); 2140 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
2055} 2141}
2056 2142
2057/** 2143/**
@@ -2166,6 +2252,147 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2166 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2252 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2167} 2253}
2168 2254
2255/* Read an RSS table row */
2256static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2257{
2258 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
2259 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
2260 5, 0, val);
2261}
2262
2263/**
2264 * t4_read_rss - read the contents of the RSS mapping table
2265 * @adapter: the adapter
2266 * @map: holds the contents of the RSS mapping table
2267 *
2268 * Reads the contents of the RSS hash->queue mapping table.
2269 */
2270int t4_read_rss(struct adapter *adapter, u16 *map)
2271{
2272 u32 val;
2273 int i, ret;
2274
2275 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2276 ret = rd_rss_row(adapter, i, &val);
2277 if (ret)
2278 return ret;
2279 *map++ = LKPTBLQUEUE0_G(val);
2280 *map++ = LKPTBLQUEUE1_G(val);
2281 }
2282 return 0;
2283}
2284
2285/**
2286 * t4_read_rss_key - read the global RSS key
2287 * @adap: the adapter
2288 * @key: 10-entry array holding the 320-bit RSS key
2289 *
2290 * Reads the global 320-bit RSS key.
2291 */
2292void t4_read_rss_key(struct adapter *adap, u32 *key)
2293{
2294 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
2295 TP_RSS_SECRET_KEY0_A);
2296}
2297
2298/**
2299 * t4_write_rss_key - program one of the RSS keys
2300 * @adap: the adapter
2301 * @key: 10-entry array holding the 320-bit RSS key
2302 * @idx: which RSS key to write
2303 *
2304 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2305 * 0..15 the corresponding entry in the RSS key table is written,
2306 * otherwise the global RSS key is written.
2307 */
2308void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2309{
2310 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
2311 TP_RSS_SECRET_KEY0_A);
2312 if (idx >= 0 && idx < 16)
2313 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
2314 KEYWRADDR_V(idx) | KEYWREN_F);
2315}
2316
2317/**
2318 * t4_read_rss_pf_config - read PF RSS Configuration Table
2319 * @adapter: the adapter
2320 * @index: the entry in the PF RSS table to read
2321 * @valp: where to store the returned value
2322 *
2323 * Reads the PF RSS Configuration Table at the specified index and returns
2324 * the value found there.
2325 */
2326void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
2327 u32 *valp)
2328{
2329 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2330 valp, 1, TP_RSS_PF0_CONFIG_A + index);
2331}
2332
2333/**
2334 * t4_read_rss_vf_config - read VF RSS Configuration Table
2335 * @adapter: the adapter
2336 * @index: the entry in the VF RSS table to read
2337 * @vfl: where to store the returned VFL
2338 * @vfh: where to store the returned VFH
2339 *
2340 * Reads the VF RSS Configuration Table at the specified index and returns
2341 * the (VFL, VFH) values found there.
2342 */
2343void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2344 u32 *vfl, u32 *vfh)
2345{
2346 u32 vrt, mask, data;
2347
2348 mask = VFWRADDR_V(VFWRADDR_M);
2349 data = VFWRADDR_V(index);
2350
2351 /* Request that the index'th VF Table values be read into VFL/VFH.
2352 */
2353 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
2354 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
2355 vrt |= data | VFRDEN_F;
2356 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
2357
2358 /* Grab the VFL/VFH values ...
2359 */
2360 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2361 vfl, 1, TP_RSS_VFL_CONFIG_A);
2362 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2363 vfh, 1, TP_RSS_VFH_CONFIG_A);
2364}
2365
2366/**
2367 * t4_read_rss_pf_map - read PF RSS Map
2368 * @adapter: the adapter
2369 *
2370 * Reads the PF RSS Map register and returns its value.
2371 */
2372u32 t4_read_rss_pf_map(struct adapter *adapter)
2373{
2374 u32 pfmap;
2375
2376 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2377 &pfmap, 1, TP_RSS_PF_MAP_A);
2378 return pfmap;
2379}
2380
2381/**
2382 * t4_read_rss_pf_mask - read PF RSS Mask
2383 * @adapter: the adapter
2384 *
2385 * Reads the PF RSS Mask register and returns its value.
2386 */
2387u32 t4_read_rss_pf_mask(struct adapter *adapter)
2388{
2389 u32 pfmask;
2390
2391 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2392 &pfmask, 1, TP_RSS_PF_MSK_A);
2393 return pfmask;
2394}
2395
2169/** 2396/**
2170 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 2397 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2171 * @adap: the adapter 2398 * @adap: the adapter
@@ -2178,23 +2405,23 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2178void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 2405void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2179 struct tp_tcp_stats *v6) 2406 struct tp_tcp_stats *v6)
2180{ 2407{
2181 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; 2408 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
2182 2409
2183#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) 2410#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
2184#define STAT(x) val[STAT_IDX(x)] 2411#define STAT(x) val[STAT_IDX(x)]
2185#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 2412#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2186 2413
2187 if (v4) { 2414 if (v4) {
2188 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 2415 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
2189 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); 2416 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
2190 v4->tcpOutRsts = STAT(OUT_RST); 2417 v4->tcpOutRsts = STAT(OUT_RST);
2191 v4->tcpInSegs = STAT64(IN_SEG); 2418 v4->tcpInSegs = STAT64(IN_SEG);
2192 v4->tcpOutSegs = STAT64(OUT_SEG); 2419 v4->tcpOutSegs = STAT64(OUT_SEG);
2193 v4->tcpRetransSegs = STAT64(RXT_SEG); 2420 v4->tcpRetransSegs = STAT64(RXT_SEG);
2194 } 2421 }
2195 if (v6) { 2422 if (v6) {
2196 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 2423 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
2197 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); 2424 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
2198 v6->tcpOutRsts = STAT(OUT_RST); 2425 v6->tcpOutRsts = STAT(OUT_RST);
2199 v6->tcpInSegs = STAT64(IN_SEG); 2426 v6->tcpInSegs = STAT64(IN_SEG);
2200 v6->tcpOutSegs = STAT64(OUT_SEG); 2427 v6->tcpOutSegs = STAT64(OUT_SEG);
@@ -2219,16 +2446,37 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2219 int i; 2446 int i;
2220 2447
2221 for (i = 0; i < NMTUS; ++i) { 2448 for (i = 0; i < NMTUS; ++i) {
2222 t4_write_reg(adap, TP_MTU_TABLE, 2449 t4_write_reg(adap, TP_MTU_TABLE_A,
2223 MTUINDEX(0xff) | MTUVALUE(i)); 2450 MTUINDEX_V(0xff) | MTUVALUE_V(i));
2224 v = t4_read_reg(adap, TP_MTU_TABLE); 2451 v = t4_read_reg(adap, TP_MTU_TABLE_A);
2225 mtus[i] = MTUVALUE_GET(v); 2452 mtus[i] = MTUVALUE_G(v);
2226 if (mtu_log) 2453 if (mtu_log)
2227 mtu_log[i] = MTUWIDTH_GET(v); 2454 mtu_log[i] = MTUWIDTH_G(v);
2228 } 2455 }
2229} 2456}
2230 2457
2231/** 2458/**
2459 * t4_read_cong_tbl - reads the congestion control table
2460 * @adap: the adapter
2461 * @incr: where to store the alpha values
2462 *
2463 * Reads the additive increments programmed into the HW congestion
2464 * control table.
2465 */
2466void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
2467{
2468 unsigned int mtu, w;
2469
2470 for (mtu = 0; mtu < NMTUS; ++mtu)
2471 for (w = 0; w < NCCTRL_WIN; ++w) {
2472 t4_write_reg(adap, TP_CCTRL_TABLE_A,
2473 ROWINDEX_V(0xffff) | (mtu << 5) | w);
2474 incr[mtu][w] = (u16)t4_read_reg(adap,
2475 TP_CCTRL_TABLE_A) & 0x1fff;
2476 }
2477}
2478
2479/**
2232 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 2480 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2233 * @adap: the adapter 2481 * @adap: the adapter
2234 * @addr: the indirect TP register address 2482 * @addr: the indirect TP register address
@@ -2240,9 +2488,9 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2240void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 2488void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2241 unsigned int mask, unsigned int val) 2489 unsigned int mask, unsigned int val)
2242{ 2490{
2243 t4_write_reg(adap, TP_PIO_ADDR, addr); 2491 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
2244 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask; 2492 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
2245 t4_write_reg(adap, TP_PIO_DATA, val); 2493 t4_write_reg(adap, TP_PIO_DATA_A, val);
2246} 2494}
2247 2495
2248/** 2496/**
@@ -2321,8 +2569,8 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2321 2569
2322 if (!(mtu & ((1 << log2) >> 2))) /* round */ 2570 if (!(mtu & ((1 << log2) >> 2))) /* round */
2323 log2--; 2571 log2--;
2324 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | 2572 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
2325 MTUWIDTH(log2) | MTUVALUE(mtu)); 2573 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
2326 2574
2327 for (w = 0; w < NCCTRL_WIN; ++w) { 2575 for (w = 0; w < NCCTRL_WIN; ++w) {
2328 unsigned int inc; 2576 unsigned int inc;
@@ -2330,13 +2578,67 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2330 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 2578 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2331 CC_MIN_INCR); 2579 CC_MIN_INCR);
2332 2580
2333 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | 2581 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
2334 (w << 16) | (beta[w] << 13) | inc); 2582 (w << 16) | (beta[w] << 13) | inc);
2335 } 2583 }
2336 } 2584 }
2337} 2585}
2338 2586
2339/** 2587/**
2588 * t4_pmtx_get_stats - returns the HW stats from PMTX
2589 * @adap: the adapter
2590 * @cnt: where to store the count statistics
2591 * @cycles: where to store the cycle statistics
2592 *
2593 * Returns performance statistics from PMTX.
2594 */
2595void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
2596{
2597 int i;
2598 u32 data[2];
2599
2600 for (i = 0; i < PM_NSTATS; i++) {
2601 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
2602 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
2603 if (is_t4(adap->params.chip)) {
2604 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
2605 } else {
2606 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
2607 PM_TX_DBG_DATA_A, data, 2,
2608 PM_TX_DBG_STAT_MSB_A);
2609 cycles[i] = (((u64)data[0] << 32) | data[1]);
2610 }
2611 }
2612}
2613
2614/**
2615 * t4_pmrx_get_stats - returns the HW stats from PMRX
2616 * @adap: the adapter
2617 * @cnt: where to store the count statistics
2618 * @cycles: where to store the cycle statistics
2619 *
2620 * Returns performance statistics from PMRX.
2621 */
2622void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
2623{
2624 int i;
2625 u32 data[2];
2626
2627 for (i = 0; i < PM_NSTATS; i++) {
2628 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
2629 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
2630 if (is_t4(adap->params.chip)) {
2631 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
2632 } else {
2633 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
2634 PM_RX_DBG_DATA_A, data, 2,
2635 PM_RX_DBG_STAT_MSB_A);
2636 cycles[i] = (((u64)data[0] << 32) | data[1]);
2637 }
2638 }
2639}
2640
2641/**
2340 * get_mps_bg_map - return the buffer groups associated with a port 2642 * get_mps_bg_map - return the buffer groups associated with a port
2341 * @adap: the adapter 2643 * @adap: the adapter
2342 * @idx: the port index 2644 * @idx: the port index
@@ -2347,7 +2649,7 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2347 */ 2649 */
2348static unsigned int get_mps_bg_map(struct adapter *adap, int idx) 2650static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2349{ 2651{
2350 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); 2652 u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
2351 2653
2352 if (n == 0) 2654 if (n == 0)
2353 return idx == 0 ? 0xf : 0; 2655 return idx == 0 ? 0xf : 0;
@@ -2485,11 +2787,11 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2485 if (is_t4(adap->params.chip)) { 2787 if (is_t4(adap->params.chip)) {
2486 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); 2788 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2487 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); 2789 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2488 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2790 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
2489 } else { 2791 } else {
2490 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO); 2792 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2491 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI); 2793 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2492 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2794 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
2493 } 2795 }
2494 2796
2495 if (addr) { 2797 if (addr) {
@@ -2499,8 +2801,8 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2499 t4_write_reg(adap, mag_id_reg_h, 2801 t4_write_reg(adap, mag_id_reg_h,
2500 (addr[0] << 8) | addr[1]); 2802 (addr[0] << 8) | addr[1]);
2501 } 2803 }
2502 t4_set_reg_field(adap, port_cfg_reg, MAGICEN, 2804 t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
2503 addr ? MAGICEN : 0); 2805 addr ? MAGICEN_F : 0);
2504} 2806}
2505 2807
2506/** 2808/**
@@ -2525,20 +2827,21 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2525 u32 port_cfg_reg; 2827 u32 port_cfg_reg;
2526 2828
2527 if (is_t4(adap->params.chip)) 2829 if (is_t4(adap->params.chip))
2528 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2830 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
2529 else 2831 else
2530 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2832 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
2531 2833
2532 if (!enable) { 2834 if (!enable) {
2533 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0); 2835 t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
2534 return 0; 2836 return 0;
2535 } 2837 }
2536 if (map > 0xff) 2838 if (map > 0xff)
2537 return -EINVAL; 2839 return -EINVAL;
2538 2840
2539#define EPIO_REG(name) \ 2841#define EPIO_REG(name) \
2540 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ 2842 (is_t4(adap->params.chip) ? \
2541 T5_PORT_REG(port, MAC_PORT_EPIO_##name)) 2843 PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
2844 T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
2542 2845
2543 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 2846 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2544 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 2847 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -2550,21 +2853,21 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2550 2853
2551 /* write byte masks */ 2854 /* write byte masks */
2552 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 2855 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2553 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); 2856 t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
2554 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2857 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2555 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) 2858 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
2556 return -ETIMEDOUT; 2859 return -ETIMEDOUT;
2557 2860
2558 /* write CRC */ 2861 /* write CRC */
2559 t4_write_reg(adap, EPIO_REG(DATA0), crc); 2862 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2560 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); 2863 t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
2561 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2864 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2562 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) 2865 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
2563 return -ETIMEDOUT; 2866 return -ETIMEDOUT;
2564 } 2867 }
2565#undef EPIO_REG 2868#undef EPIO_REG
2566 2869
2567 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); 2870 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
2568 return 0; 2871 return 0;
2569} 2872}
2570 2873
@@ -2749,9 +3052,9 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2749 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 3052 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2750 }; 3053 };
2751 static const u32 sge_regs[] = { 3054 static const u32 sge_regs[] = {
2752 SGE_DEBUG_DATA_LOW_INDEX_2, 3055 SGE_DEBUG_DATA_LOW_INDEX_2_A,
2753 SGE_DEBUG_DATA_LOW_INDEX_3, 3056 SGE_DEBUG_DATA_LOW_INDEX_3_A,
2754 SGE_DEBUG_DATA_HIGH_INDEX_10, 3057 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
2755 }; 3058 };
2756 const char **sge_idma_decode; 3059 const char **sge_idma_decode;
2757 int sge_idma_decode_nstates; 3060 int sge_idma_decode_nstates;
@@ -2818,7 +3121,7 @@ retry:
2818 if (ret < 0) { 3121 if (ret < 0) {
2819 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 3122 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2820 goto retry; 3123 goto retry;
2821 if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR) 3124 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
2822 t4_report_fw_error(adap); 3125 t4_report_fw_error(adap);
2823 return ret; 3126 return ret;
2824 } 3127 }
@@ -2868,8 +3171,8 @@ retry:
2868 * timeout ... and then retry if we haven't exhausted 3171 * timeout ... and then retry if we haven't exhausted
2869 * our retries ... 3172 * our retries ...
2870 */ 3173 */
2871 pcie_fw = t4_read_reg(adap, MA_PCIE_FW); 3174 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
2872 if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) { 3175 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
2873 if (waiting <= 0) { 3176 if (waiting <= 0) {
2874 if (retries-- > 0) 3177 if (retries-- > 0)
2875 goto retry; 3178 goto retry;
@@ -2884,9 +3187,9 @@ retry:
2884 * report errors preferentially. 3187 * report errors preferentially.
2885 */ 3188 */
2886 if (state) { 3189 if (state) {
2887 if (pcie_fw & PCIE_FW_ERR) 3190 if (pcie_fw & PCIE_FW_ERR_F)
2888 *state = DEV_STATE_ERR; 3191 *state = DEV_STATE_ERR;
2889 else if (pcie_fw & PCIE_FW_INIT) 3192 else if (pcie_fw & PCIE_FW_INIT_F)
2890 *state = DEV_STATE_INIT; 3193 *state = DEV_STATE_INIT;
2891 } 3194 }
2892 3195
@@ -2896,7 +3199,7 @@ retry:
2896 * for our caller. 3199 * for our caller.
2897 */ 3200 */
2898 if (master_mbox == PCIE_FW_MASTER_M && 3201 if (master_mbox == PCIE_FW_MASTER_M &&
2899 (pcie_fw & PCIE_FW_MASTER_VLD)) 3202 (pcie_fw & PCIE_FW_MASTER_VLD_F))
2900 master_mbox = PCIE_FW_MASTER_G(pcie_fw); 3203 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
2901 break; 3204 break;
2902 } 3205 }
@@ -2985,7 +3288,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2985 3288
2986 memset(&c, 0, sizeof(c)); 3289 memset(&c, 0, sizeof(c));
2987 INIT_CMD(c, RESET, WRITE); 3290 INIT_CMD(c, RESET, WRITE);
2988 c.val = htonl(PIORST | PIORSTMODE); 3291 c.val = htonl(PIORST_F | PIORSTMODE_F);
2989 c.halt_pkd = htonl(FW_RESET_CMD_HALT_F); 3292 c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
2990 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3293 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2991 } 3294 }
@@ -3004,8 +3307,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
3004 * rather than a RESET ... if it's new enough to understand that ... 3307 * rather than a RESET ... if it's new enough to understand that ...
3005 */ 3308 */
3006 if (ret == 0 || force) { 3309 if (ret == 0 || force) {
3007 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST); 3310 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
3008 t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 3311 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
3009 PCIE_FW_HALT_F); 3312 PCIE_FW_HALT_F);
3010 } 3313 }
3011 3314
@@ -3045,7 +3348,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3045 * doing it automatically, we need to clear the PCIE_FW.HALT 3348 * doing it automatically, we need to clear the PCIE_FW.HALT
3046 * bit. 3349 * bit.
3047 */ 3350 */
3048 t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0); 3351 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
3049 3352
3050 /* 3353 /*
3051 * If we've been given a valid mailbox, first try to get the 3354 * If we've been given a valid mailbox, first try to get the
@@ -3055,21 +3358,21 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3055 * hitting the chip with a hammer. 3358 * hitting the chip with a hammer.
3056 */ 3359 */
3057 if (mbox <= PCIE_FW_MASTER_M) { 3360 if (mbox <= PCIE_FW_MASTER_M) {
3058 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); 3361 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
3059 msleep(100); 3362 msleep(100);
3060 if (t4_fw_reset(adap, mbox, 3363 if (t4_fw_reset(adap, mbox,
3061 PIORST | PIORSTMODE) == 0) 3364 PIORST_F | PIORSTMODE_F) == 0)
3062 return 0; 3365 return 0;
3063 } 3366 }
3064 3367
3065 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE); 3368 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
3066 msleep(2000); 3369 msleep(2000);
3067 } else { 3370 } else {
3068 int ms; 3371 int ms;
3069 3372
3070 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); 3373 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
3071 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 3374 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3072 if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F)) 3375 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
3073 return 0; 3376 return 0;
3074 msleep(100); 3377 msleep(100);
3075 ms += 100; 3378 ms += 100;
@@ -3148,22 +3451,23 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3148 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; 3451 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3149 unsigned int fl_align_log = fls(fl_align) - 1; 3452 unsigned int fl_align_log = fls(fl_align) - 1;
3150 3453
3151 t4_write_reg(adap, SGE_HOST_PAGE_SIZE, 3454 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
3152 HOSTPAGESIZEPF0(sge_hps) | 3455 HOSTPAGESIZEPF0_V(sge_hps) |
3153 HOSTPAGESIZEPF1(sge_hps) | 3456 HOSTPAGESIZEPF1_V(sge_hps) |
3154 HOSTPAGESIZEPF2(sge_hps) | 3457 HOSTPAGESIZEPF2_V(sge_hps) |
3155 HOSTPAGESIZEPF3(sge_hps) | 3458 HOSTPAGESIZEPF3_V(sge_hps) |
3156 HOSTPAGESIZEPF4(sge_hps) | 3459 HOSTPAGESIZEPF4_V(sge_hps) |
3157 HOSTPAGESIZEPF5(sge_hps) | 3460 HOSTPAGESIZEPF5_V(sge_hps) |
3158 HOSTPAGESIZEPF6(sge_hps) | 3461 HOSTPAGESIZEPF6_V(sge_hps) |
3159 HOSTPAGESIZEPF7(sge_hps)); 3462 HOSTPAGESIZEPF7_V(sge_hps));
3160 3463
3161 if (is_t4(adap->params.chip)) { 3464 if (is_t4(adap->params.chip)) {
3162 t4_set_reg_field(adap, SGE_CONTROL, 3465 t4_set_reg_field(adap, SGE_CONTROL_A,
3163 INGPADBOUNDARY_MASK | 3466 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
3164 EGRSTATUSPAGESIZE_MASK, 3467 EGRSTATUSPAGESIZE_F,
3165 INGPADBOUNDARY(fl_align_log - 5) | 3468 INGPADBOUNDARY_V(fl_align_log -
3166 EGRSTATUSPAGESIZE(stat_len != 64)); 3469 INGPADBOUNDARY_SHIFT_X) |
3470 EGRSTATUSPAGESIZE_V(stat_len != 64));
3167 } else { 3471 } else {
3168 /* T5 introduced the separation of the Free List Padding and 3472 /* T5 introduced the separation of the Free List Padding and
3169 * Packing Boundaries. Thus, we can select a smaller Padding 3473 * Packing Boundaries. Thus, we can select a smaller Padding
@@ -3193,15 +3497,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3193 fl_align = 64; 3497 fl_align = 64;
3194 fl_align_log = 6; 3498 fl_align_log = 6;
3195 } 3499 }
3196 t4_set_reg_field(adap, SGE_CONTROL, 3500 t4_set_reg_field(adap, SGE_CONTROL_A,
3197 INGPADBOUNDARY_MASK | 3501 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
3198 EGRSTATUSPAGESIZE_MASK, 3502 EGRSTATUSPAGESIZE_F,
3199 INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) | 3503 INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
3200 EGRSTATUSPAGESIZE(stat_len != 64)); 3504 EGRSTATUSPAGESIZE_V(stat_len != 64));
3201 t4_set_reg_field(adap, SGE_CONTROL2_A, 3505 t4_set_reg_field(adap, SGE_CONTROL2_A,
3202 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), 3506 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3203 INGPACKBOUNDARY_V(fl_align_log - 3507 INGPACKBOUNDARY_V(fl_align_log -
3204 INGPACKBOUNDARY_SHIFT_X)); 3508 INGPACKBOUNDARY_SHIFT_X));
3205 } 3509 }
3206 /* 3510 /*
3207 * Adjust various SGE Free List Host Buffer Sizes. 3511 * Adjust various SGE Free List Host Buffer Sizes.
@@ -3224,15 +3528,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3224 * Default Firmware Configuration File but we need to adjust it for 3528 * Default Firmware Configuration File but we need to adjust it for
3225 * this host's cache line size. 3529 * this host's cache line size.
3226 */ 3530 */
3227 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size); 3531 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
3228 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2, 3532 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
3229 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1) 3533 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
3230 & ~(fl_align-1)); 3534 & ~(fl_align-1));
3231 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3, 3535 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
3232 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1) 3536 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
3233 & ~(fl_align-1)); 3537 & ~(fl_align-1));
3234 3538
3235 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12)); 3539 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
3236 3540
3237 return 0; 3541 return 0;
3238} 3542}
@@ -3917,12 +4221,12 @@ int t4_wait_dev_ready(void __iomem *regs)
3917{ 4221{
3918 u32 whoami; 4222 u32 whoami;
3919 4223
3920 whoami = readl(regs + PL_WHOAMI); 4224 whoami = readl(regs + PL_WHOAMI_A);
3921 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS) 4225 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
3922 return 0; 4226 return 0;
3923 4227
3924 msleep(500); 4228 msleep(500);
3925 whoami = readl(regs + PL_WHOAMI); 4229 whoami = readl(regs + PL_WHOAMI_A);
3926 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO); 4230 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
3927} 4231}
3928 4232
@@ -3946,7 +4250,7 @@ static int get_flash_params(struct adapter *adap)
3946 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); 4250 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3947 if (!ret) 4251 if (!ret)
3948 ret = sf1_read(adap, 3, 0, 1, &info); 4252 ret = sf1_read(adap, 3, 0, 1, &info);
3949 t4_write_reg(adap, SF_OP, 0); /* unlock SF */ 4253 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
3950 if (ret) 4254 if (ret)
3951 return ret; 4255 return ret;
3952 4256
@@ -3969,7 +4273,7 @@ static int get_flash_params(struct adapter *adap)
3969 return -EINVAL; 4273 return -EINVAL;
3970 adap->params.sf_size = 1 << info; 4274 adap->params.sf_size = 1 << info;
3971 adap->params.sf_fw_start = 4275 adap->params.sf_fw_start =
3972 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; 4276 t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
3973 4277
3974 if (adap->params.sf_size < FLASH_MIN_SIZE) 4278 if (adap->params.sf_size < FLASH_MIN_SIZE)
3975 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n", 4279 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
@@ -3993,7 +4297,7 @@ int t4_prep_adapter(struct adapter *adapter)
3993 u32 pl_rev; 4297 u32 pl_rev;
3994 4298
3995 get_pci_mode(adapter, &adapter->params.pci); 4299 get_pci_mode(adapter, &adapter->params.pci);
3996 pl_rev = G_REV(t4_read_reg(adapter, PL_REV)); 4300 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
3997 4301
3998 ret = get_flash_params(adapter); 4302 ret = get_flash_params(adapter);
3999 if (ret < 0) { 4303 if (ret < 0) {
@@ -4019,6 +4323,7 @@ int t4_prep_adapter(struct adapter *adapter)
4019 return -EINVAL; 4323 return -EINVAL;
4020 } 4324 }
4021 4325
4326 adapter->params.cim_la_size = CIMLA_SIZE;
4022 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 4327 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4023 4328
4024 /* 4329 /*
@@ -4133,7 +4438,7 @@ int t4_init_sge_params(struct adapter *adapter)
4133 4438
4134 /* Extract the SGE Page Size for our PF. 4439 /* Extract the SGE Page Size for our PF.
4135 */ 4440 */
4136 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE); 4441 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
4137 s_hps = (HOSTPAGESIZEPF0_S + 4442 s_hps = (HOSTPAGESIZEPF0_S +
4138 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn); 4443 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
4139 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M); 4444 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
@@ -4142,10 +4447,10 @@ int t4_init_sge_params(struct adapter *adapter)
4142 */ 4447 */
4143 s_qpp = (QUEUESPERPAGEPF0_S + 4448 s_qpp = (QUEUESPERPAGEPF0_S +
4144 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn); 4449 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
4145 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF); 4450 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
4146 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); 4451 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
4147 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF); 4452 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
4148 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); 4453 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
4149 4454
4150 return 0; 4455 return 0;
4151} 4456}
@@ -4161,9 +4466,9 @@ int t4_init_tp_params(struct adapter *adap)
4161 int chan; 4466 int chan;
4162 u32 v; 4467 u32 v;
4163 4468
4164 v = t4_read_reg(adap, TP_TIMER_RESOLUTION); 4469 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
4165 adap->params.tp.tre = TIMERRESOLUTION_GET(v); 4470 adap->params.tp.tre = TIMERRESOLUTION_G(v);
4166 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); 4471 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
4167 4472
4168 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 4473 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4169 for (chan = 0; chan < NCHAN; chan++) 4474 for (chan = 0; chan < NCHAN; chan++)
@@ -4172,27 +4477,27 @@ int t4_init_tp_params(struct adapter *adap)
4172 /* Cache the adapter's Compressed Filter Mode and global Incress 4477 /* Cache the adapter's Compressed Filter Mode and global Incress
4173 * Configuration. 4478 * Configuration.
4174 */ 4479 */
4175 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 4480 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4176 &adap->params.tp.vlan_pri_map, 1, 4481 &adap->params.tp.vlan_pri_map, 1,
4177 TP_VLAN_PRI_MAP); 4482 TP_VLAN_PRI_MAP_A);
4178 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 4483 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4179 &adap->params.tp.ingress_config, 1, 4484 &adap->params.tp.ingress_config, 1,
4180 TP_INGRESS_CONFIG); 4485 TP_INGRESS_CONFIG_A);
4181 4486
4182 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 4487 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
4183 * shift positions of several elements of the Compressed Filter Tuple 4488 * shift positions of several elements of the Compressed Filter Tuple
4184 * for this adapter which we need frequently ... 4489 * for this adapter which we need frequently ...
4185 */ 4490 */
4186 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); 4491 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
4187 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 4492 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
4188 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); 4493 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
4189 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, 4494 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
4190 F_PROTOCOL); 4495 PROTOCOL_F);
4191 4496
4192 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID 4497 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
4193 * represents the presense of an Outer VLAN instead of a VNIC ID. 4498 * represents the presense of an Outer VLAN instead of a VNIC ID.
4194 */ 4499 */
4195 if ((adap->params.tp.ingress_config & F_VNIC) == 0) 4500 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
4196 adap->params.tp.vnic_shift = -1; 4501 adap->params.tp.vnic_shift = -1;
4197 4502
4198 return 0; 4503 return 0;
@@ -4218,35 +4523,35 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
4218 4523
4219 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 4524 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4220 switch (filter_mode & sel) { 4525 switch (filter_mode & sel) {
4221 case F_FCOE: 4526 case FCOE_F:
4222 field_shift += W_FT_FCOE; 4527 field_shift += FT_FCOE_W;
4223 break; 4528 break;
4224 case F_PORT: 4529 case PORT_F:
4225 field_shift += W_FT_PORT; 4530 field_shift += FT_PORT_W;
4226 break; 4531 break;
4227 case F_VNIC_ID: 4532 case VNIC_ID_F:
4228 field_shift += W_FT_VNIC_ID; 4533 field_shift += FT_VNIC_ID_W;
4229 break; 4534 break;
4230 case F_VLAN: 4535 case VLAN_F:
4231 field_shift += W_FT_VLAN; 4536 field_shift += FT_VLAN_W;
4232 break; 4537 break;
4233 case F_TOS: 4538 case TOS_F:
4234 field_shift += W_FT_TOS; 4539 field_shift += FT_TOS_W;
4235 break; 4540 break;
4236 case F_PROTOCOL: 4541 case PROTOCOL_F:
4237 field_shift += W_FT_PROTOCOL; 4542 field_shift += FT_PROTOCOL_W;
4238 break; 4543 break;
4239 case F_ETHERTYPE: 4544 case ETHERTYPE_F:
4240 field_shift += W_FT_ETHERTYPE; 4545 field_shift += FT_ETHERTYPE_W;
4241 break; 4546 break;
4242 case F_MACMATCH: 4547 case MACMATCH_F:
4243 field_shift += W_FT_MACMATCH; 4548 field_shift += FT_MACMATCH_W;
4244 break; 4549 break;
4245 case F_MPSHITTYPE: 4550 case MPSHITTYPE_F:
4246 field_shift += W_FT_MPSHITTYPE; 4551 field_shift += FT_MPSHITTYPE_W;
4247 break; 4552 break;
4248 case F_FRAGMENTATION: 4553 case FRAGMENTATION_F:
4249 field_shift += W_FT_FRAGMENTATION; 4554 field_shift += FT_FRAGMENTATION_W;
4250 break; 4555 break;
4251 } 4556 }
4252 } 4557 }
@@ -4311,3 +4616,289 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
4311 } 4616 }
4312 return 0; 4617 return 0;
4313} 4618}
4619
4620/**
4621 * t4_read_cimq_cfg - read CIM queue configuration
4622 * @adap: the adapter
4623 * @base: holds the queue base addresses in bytes
4624 * @size: holds the queue sizes in bytes
4625 * @thres: holds the queue full thresholds in bytes
4626 *
4627 * Returns the current configuration of the CIM queues, starting with
4628 * the IBQs, then the OBQs.
4629 */
4630void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
4631{
4632 unsigned int i, v;
4633 int cim_num_obq = is_t4(adap->params.chip) ?
4634 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4635
4636 for (i = 0; i < CIM_NUM_IBQ; i++) {
4637 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
4638 QUENUMSELECT_V(i));
4639 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
4640 /* value is in 256-byte units */
4641 *base++ = CIMQBASE_G(v) * 256;
4642 *size++ = CIMQSIZE_G(v) * 256;
4643 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
4644 }
4645 for (i = 0; i < cim_num_obq; i++) {
4646 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
4647 QUENUMSELECT_V(i));
4648 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
4649 /* value is in 256-byte units */
4650 *base++ = CIMQBASE_G(v) * 256;
4651 *size++ = CIMQSIZE_G(v) * 256;
4652 }
4653}
4654
4655/**
4656 * t4_read_cim_ibq - read the contents of a CIM inbound queue
4657 * @adap: the adapter
4658 * @qid: the queue index
4659 * @data: where to store the queue contents
4660 * @n: capacity of @data in 32-bit words
4661 *
4662 * Reads the contents of the selected CIM queue starting at address 0 up
4663 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
4664 * error and the number of 32-bit words actually read on success.
4665 */
4666int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
4667{
4668 int i, err, attempts;
4669 unsigned int addr;
4670 const unsigned int nwords = CIM_IBQ_SIZE * 4;
4671
4672 if (qid > 5 || (n & 3))
4673 return -EINVAL;
4674
4675 addr = qid * nwords;
4676 if (n > nwords)
4677 n = nwords;
4678
4679 /* It might take 3-10ms before the IBQ debug read access is allowed.
4680 * Wait for 1 Sec with a delay of 1 usec.
4681 */
4682 attempts = 1000000;
4683
4684 for (i = 0; i < n; i++, addr++) {
4685 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
4686 IBQDBGEN_F);
4687 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
4688 attempts, 1);
4689 if (err)
4690 return err;
4691 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
4692 }
4693 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
4694 return i;
4695}
4696
4697/**
4698 * t4_read_cim_obq - read the contents of a CIM outbound queue
4699 * @adap: the adapter
4700 * @qid: the queue index
4701 * @data: where to store the queue contents
4702 * @n: capacity of @data in 32-bit words
4703 *
4704 * Reads the contents of the selected CIM queue starting at address 0 up
4705 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
4706 * error and the number of 32-bit words actually read on success.
4707 */
4708int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
4709{
4710 int i, err;
4711 unsigned int addr, v, nwords;
4712 int cim_num_obq = is_t4(adap->params.chip) ?
4713 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4714
4715 if ((qid > (cim_num_obq - 1)) || (n & 3))
4716 return -EINVAL;
4717
4718 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
4719 QUENUMSELECT_V(qid));
4720 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
4721
4722 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
4723 nwords = CIMQSIZE_G(v) * 64; /* same */
4724 if (n > nwords)
4725 n = nwords;
4726
4727 for (i = 0; i < n; i++, addr++) {
4728 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
4729 OBQDBGEN_F);
4730 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
4731 2, 1);
4732 if (err)
4733 return err;
4734 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
4735 }
4736 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
4737 return i;
4738}
4739
4740/**
4741 * t4_cim_read - read a block from CIM internal address space
4742 * @adap: the adapter
4743 * @addr: the start address within the CIM address space
4744 * @n: number of words to read
4745 * @valp: where to store the result
4746 *
4747 * Reads a block of 4-byte words from the CIM intenal address space.
4748 */
4749int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
4750 unsigned int *valp)
4751{
4752 int ret = 0;
4753
4754 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
4755 return -EBUSY;
4756
4757 for ( ; !ret && n--; addr += 4) {
4758 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
4759 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
4760 0, 5, 2);
4761 if (!ret)
4762 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
4763 }
4764 return ret;
4765}
4766
4767/**
4768 * t4_cim_write - write a block into CIM internal address space
4769 * @adap: the adapter
4770 * @addr: the start address within the CIM address space
4771 * @n: number of words to write
4772 * @valp: set of values to write
4773 *
4774 * Writes a block of 4-byte words into the CIM intenal address space.
4775 */
4776int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
4777 const unsigned int *valp)
4778{
4779 int ret = 0;
4780
4781 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
4782 return -EBUSY;
4783
4784 for ( ; !ret && n--; addr += 4) {
4785 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
4786 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
4787 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
4788 0, 5, 2);
4789 }
4790 return ret;
4791}
4792
4793static int t4_cim_write1(struct adapter *adap, unsigned int addr,
4794 unsigned int val)
4795{
4796 return t4_cim_write(adap, addr, 1, &val);
4797}
4798
4799/**
4800 * t4_cim_read_la - read CIM LA capture buffer
4801 * @adap: the adapter
4802 * @la_buf: where to store the LA data
4803 * @wrptr: the HW write pointer within the capture buffer
4804 *
4805 * Reads the contents of the CIM LA buffer with the most recent entry at
4806 * the end of the returned data and with the entry at @wrptr first.
4807 * We try to leave the LA in the running state we find it in.
4808 */
4809int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
4810{
4811 int i, ret;
4812 unsigned int cfg, val, idx;
4813
4814 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
4815 if (ret)
4816 return ret;
4817
4818 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
4819 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
4820 if (ret)
4821 return ret;
4822 }
4823
4824 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
4825 if (ret)
4826 goto restart;
4827
4828 idx = UPDBGLAWRPTR_G(val);
4829 if (wrptr)
4830 *wrptr = idx;
4831
4832 for (i = 0; i < adap->params.cim_la_size; i++) {
4833 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
4834 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
4835 if (ret)
4836 break;
4837 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
4838 if (ret)
4839 break;
4840 if (val & UPDBGLARDEN_F) {
4841 ret = -ETIMEDOUT;
4842 break;
4843 }
4844 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
4845 if (ret)
4846 break;
4847 idx = (idx + 1) & UPDBGLARDPTR_M;
4848 }
4849restart:
4850 if (cfg & UPDBGLAEN_F) {
4851 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
4852 cfg & ~UPDBGLARDEN_F);
4853 if (!ret)
4854 ret = r;
4855 }
4856 return ret;
4857}
4858
4859/**
4860 * t4_tp_read_la - read TP LA capture buffer
4861 * @adap: the adapter
4862 * @la_buf: where to store the LA data
4863 * @wrptr: the HW write pointer within the capture buffer
4864 *
4865 * Reads the contents of the TP LA buffer with the most recent entry at
4866 * the end of the returned data and with the entry at @wrptr first.
4867 * We leave the LA in the running state we find it in.
4868 */
4869void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
4870{
4871 bool last_incomplete;
4872 unsigned int i, cfg, val, idx;
4873
4874 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
4875 if (cfg & DBGLAENABLE_F) /* freeze LA */
4876 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
4877 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
4878
4879 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
4880 idx = DBGLAWPTR_G(val);
4881 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
4882 if (last_incomplete)
4883 idx = (idx + 1) & DBGLARPTR_M;
4884 if (wrptr)
4885 *wrptr = idx;
4886
4887 val &= 0xffff;
4888 val &= ~DBGLARPTR_V(DBGLARPTR_M);
4889 val |= adap->params.tp.la_mask;
4890
4891 for (i = 0; i < TPLA_SIZE; i++) {
4892 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
4893 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
4894 idx = (idx + 1) & DBGLARPTR_M;
4895 }
4896
4897 /* Wipe out last entry if it isn't valid */
4898 if (last_incomplete)
4899 la_buf[TPLA_SIZE - 1] = ~0ULL;
4900
4901 if (cfg & DBGLAENABLE_F) /* restore running state */
4902 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
4903 cfg | adap->params.tp.la_mask);
4904}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c19a90e7f7d1..380b15c0417a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -48,6 +48,7 @@ enum {
48 NMTUS = 16, /* size of MTU table */ 48 NMTUS = 16, /* size of MTU table */
49 NCCTRL_WIN = 32, /* # of congestion control windows */ 49 NCCTRL_WIN = 32, /* # of congestion control windows */
50 L2T_SIZE = 4096, /* # of L2T entries */ 50 L2T_SIZE = 4096, /* # of L2T entries */
51 PM_NSTATS = 5, /* # of PM stats */
51 MBOX_LEN = 64, /* mailbox size in bytes */ 52 MBOX_LEN = 64, /* mailbox size in bytes */
52 TRACE_LEN = 112, /* length of trace data and mask */ 53 TRACE_LEN = 112, /* length of trace data and mask */
53 FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ 54 FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
@@ -56,6 +57,17 @@ enum {
56}; 57};
57 58
58enum { 59enum {
60 CIM_NUM_IBQ = 6, /* # of CIM IBQs */
61 CIM_NUM_OBQ = 6, /* # of CIM OBQs */
62 CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */
63 CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */
64 CIM_IBQ_SIZE = 128, /* # of 128-bit words in a CIM IBQ */
65 CIM_OBQ_SIZE = 128, /* # of 128-bit words in a CIM OBQ */
66 TPLA_SIZE = 128, /* # of 64-bit words in TP LA */
67 ULPRX_LA_SIZE = 512, /* # of 256-bit words in ULP_RX LA */
68};
69
70enum {
59 SF_PAGE_SIZE = 256, /* serial flash page size */ 71 SF_PAGE_SIZE = 256, /* serial flash page size */
60 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ 72 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
61}; 73};
@@ -110,6 +122,18 @@ enum {
110 SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */ 122 SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
111}; 123};
112 124
125/* PCI-e memory window access */
126enum pcie_memwin {
127 MEMWIN_NIC = 0,
128 MEMWIN_RSVD1 = 1,
129 MEMWIN_RSVD2 = 2,
130 MEMWIN_RDMA = 3,
131 MEMWIN_RSVD4 = 4,
132 MEMWIN_FOISCSI = 5,
133 MEMWIN_CSIOSTOR = 6,
134 MEMWIN_RSVD7 = 7,
135};
136
113struct sge_qstat { /* data written to SGE queue status entries */ 137struct sge_qstat { /* data written to SGE queue status entries */
114 __be32 qid; 138 __be32 qid;
115 __be16 cidx; 139 __be16 cidx;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 0f89f68948ab..0fb975e258b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -124,6 +124,13 @@ enum CPL_error {
124}; 124};
125 125
126enum { 126enum {
127 CPL_CONN_POLICY_AUTO = 0,
128 CPL_CONN_POLICY_ASK = 1,
129 CPL_CONN_POLICY_FILTER = 2,
130 CPL_CONN_POLICY_DENY = 3
131};
132
133enum {
127 ULP_MODE_NONE = 0, 134 ULP_MODE_NONE = 0,
128 ULP_MODE_ISCSI = 2, 135 ULP_MODE_ISCSI = 2,
129 ULP_MODE_RDMA = 4, 136 ULP_MODE_RDMA = 4,
@@ -160,16 +167,28 @@ union opcode_tid {
160 u8 opcode; 167 u8 opcode;
161}; 168};
162 169
163#define CPL_OPCODE(x) ((x) << 24) 170#define CPL_OPCODE_S 24
164#define G_CPL_OPCODE(x) (((x) >> 24) & 0xFF) 171#define CPL_OPCODE_V(x) ((x) << CPL_OPCODE_S)
165#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid)) 172#define CPL_OPCODE_G(x) (((x) >> CPL_OPCODE_S) & 0xFF)
173#define TID_G(x) ((x) & 0xFFFFFF)
174
175/* tid is assumed to be 24-bits */
176#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE_V(opcode) | (tid))
177
166#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) 178#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
167#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF) 179
180/* extract the TID from a CPL command */
181#define GET_TID(cmd) (TID_G(be32_to_cpu(OPCODE_TID(cmd))))
168 182
169/* partitioning of TID fields that also carry a queue id */ 183/* partitioning of TID fields that also carry a queue id */
170#define GET_TID_TID(x) ((x) & 0x3fff) 184#define TID_TID_S 0
171#define GET_TID_QID(x) (((x) >> 14) & 0x3ff) 185#define TID_TID_M 0x3fff
172#define TID_QID(x) ((x) << 14) 186#define TID_TID_G(x) (((x) >> TID_TID_S) & TID_TID_M)
187
188#define TID_QID_S 14
189#define TID_QID_M 0x3ff
190#define TID_QID_V(x) ((x) << TID_QID_S)
191#define TID_QID_G(x) (((x) >> TID_QID_S) & TID_QID_M)
173 192
174struct rss_header { 193struct rss_header {
175 u8 opcode; 194 u8 opcode;
@@ -199,8 +218,8 @@ struct work_request_hdr {
199}; 218};
200 219
201/* wr_hi fields */ 220/* wr_hi fields */
202#define S_WR_OP 24 221#define WR_OP_S 24
203#define V_WR_OP(x) ((__u64)(x) << S_WR_OP) 222#define WR_OP_V(x) ((__u64)(x) << WR_OP_S)
204 223
205#define WR_HDR struct work_request_hdr wr 224#define WR_HDR struct work_request_hdr wr
206 225
@@ -270,17 +289,42 @@ struct cpl_pass_open_req {
270 __be32 local_ip; 289 __be32 local_ip;
271 __be32 peer_ip; 290 __be32 peer_ip;
272 __be64 opt0; 291 __be64 opt0;
273#define NO_CONG(x) ((x) << 4)
274#define DELACK(x) ((x) << 5)
275#define DSCP(x) ((x) << 22)
276#define TCAM_BYPASS(x) ((u64)(x) << 48)
277#define NAGLE(x) ((u64)(x) << 49)
278 __be64 opt1; 292 __be64 opt1;
279#define SYN_RSS_ENABLE (1 << 0)
280#define SYN_RSS_QUEUE(x) ((x) << 2)
281#define CONN_POLICY_ASK (1 << 22)
282}; 293};
283 294
295/* option 0 fields */
296#define NO_CONG_S 4
297#define NO_CONG_V(x) ((x) << NO_CONG_S)
298#define NO_CONG_F NO_CONG_V(1U)
299
300#define DELACK_S 5
301#define DELACK_V(x) ((x) << DELACK_S)
302#define DELACK_F DELACK_V(1U)
303
304#define DSCP_S 22
305#define DSCP_M 0x3F
306#define DSCP_V(x) ((x) << DSCP_S)
307#define DSCP_G(x) (((x) >> DSCP_S) & DSCP_M)
308
309#define TCAM_BYPASS_S 48
310#define TCAM_BYPASS_V(x) ((__u64)(x) << TCAM_BYPASS_S)
311#define TCAM_BYPASS_F TCAM_BYPASS_V(1ULL)
312
313#define NAGLE_S 49
314#define NAGLE_V(x) ((__u64)(x) << NAGLE_S)
315#define NAGLE_F NAGLE_V(1ULL)
316
317/* option 1 fields */
318#define SYN_RSS_ENABLE_S 0
319#define SYN_RSS_ENABLE_V(x) ((x) << SYN_RSS_ENABLE_S)
320#define SYN_RSS_ENABLE_F SYN_RSS_ENABLE_V(1U)
321
322#define SYN_RSS_QUEUE_S 2
323#define SYN_RSS_QUEUE_V(x) ((x) << SYN_RSS_QUEUE_S)
324
325#define CONN_POLICY_S 22
326#define CONN_POLICY_V(x) ((x) << CONN_POLICY_S)
327
284struct cpl_pass_open_req6 { 328struct cpl_pass_open_req6 {
285 WR_HDR; 329 WR_HDR;
286 union opcode_tid ot; 330 union opcode_tid ot;
@@ -304,16 +348,37 @@ struct cpl_pass_accept_rpl {
304 WR_HDR; 348 WR_HDR;
305 union opcode_tid ot; 349 union opcode_tid ot;
306 __be32 opt2; 350 __be32 opt2;
307#define RX_COALESCE_VALID(x) ((x) << 11)
308#define RX_COALESCE(x) ((x) << 12)
309#define PACE(x) ((x) << 16)
310#define TX_QUEUE(x) ((x) << 23)
311#define CCTRL_ECN(x) ((x) << 27)
312#define TSTAMPS_EN(x) ((x) << 29)
313#define SACK_EN(x) ((x) << 30)
314 __be64 opt0; 351 __be64 opt0;
315}; 352};
316 353
354/* option 2 fields */
355#define RX_COALESCE_VALID_S 11
356#define RX_COALESCE_VALID_V(x) ((x) << RX_COALESCE_VALID_S)
357#define RX_COALESCE_VALID_F RX_COALESCE_VALID_V(1U)
358
359#define RX_COALESCE_S 12
360#define RX_COALESCE_V(x) ((x) << RX_COALESCE_S)
361
362#define PACE_S 16
363#define PACE_V(x) ((x) << PACE_S)
364
365#define TX_QUEUE_S 23
366#define TX_QUEUE_M 0x7
367#define TX_QUEUE_V(x) ((x) << TX_QUEUE_S)
368#define TX_QUEUE_G(x) (((x) >> TX_QUEUE_S) & TX_QUEUE_M)
369
370#define CCTRL_ECN_S 27
371#define CCTRL_ECN_V(x) ((x) << CCTRL_ECN_S)
372#define CCTRL_ECN_F CCTRL_ECN_V(1U)
373
374#define TSTAMPS_EN_S 29
375#define TSTAMPS_EN_V(x) ((x) << TSTAMPS_EN_S)
376#define TSTAMPS_EN_F TSTAMPS_EN_V(1U)
377
378#define SACK_EN_S 30
379#define SACK_EN_V(x) ((x) << SACK_EN_S)
380#define SACK_EN_F SACK_EN_V(1U)
381
317struct cpl_t5_pass_accept_rpl { 382struct cpl_t5_pass_accept_rpl {
318 WR_HDR; 383 WR_HDR;
319 union opcode_tid ot; 384 union opcode_tid ot;
@@ -384,30 +449,61 @@ struct cpl_t5_act_open_req6 {
384struct cpl_act_open_rpl { 449struct cpl_act_open_rpl {
385 union opcode_tid ot; 450 union opcode_tid ot;
386 __be32 atid_status; 451 __be32 atid_status;
387#define GET_AOPEN_STATUS(x) ((x) & 0xff)
388#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff)
389}; 452};
390 453
454/* cpl_act_open_rpl.atid_status fields */
455#define AOPEN_STATUS_S 0
456#define AOPEN_STATUS_M 0xFF
457#define AOPEN_STATUS_G(x) (((x) >> AOPEN_STATUS_S) & AOPEN_STATUS_M)
458
459#define AOPEN_ATID_S 8
460#define AOPEN_ATID_M 0xFFFFFF
461#define AOPEN_ATID_G(x) (((x) >> AOPEN_ATID_S) & AOPEN_ATID_M)
462
391struct cpl_pass_establish { 463struct cpl_pass_establish {
392 union opcode_tid ot; 464 union opcode_tid ot;
393 __be32 rsvd; 465 __be32 rsvd;
394 __be32 tos_stid; 466 __be32 tos_stid;
395#define PASS_OPEN_TID(x) ((x) << 0)
396#define PASS_OPEN_TOS(x) ((x) << 24)
397#define GET_PASS_OPEN_TID(x) (((x) >> 0) & 0xFFFFFF)
398#define GET_POPEN_TID(x) ((x) & 0xffffff)
399#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
400 __be16 mac_idx; 467 __be16 mac_idx;
401 __be16 tcp_opt; 468 __be16 tcp_opt;
402#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
403#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1)
404#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
405#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
406#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
407 __be32 snd_isn; 469 __be32 snd_isn;
408 __be32 rcv_isn; 470 __be32 rcv_isn;
409}; 471};
410 472
473/* cpl_pass_establish.tos_stid fields */
474#define PASS_OPEN_TID_S 0
475#define PASS_OPEN_TID_M 0xFFFFFF
476#define PASS_OPEN_TID_V(x) ((x) << PASS_OPEN_TID_S)
477#define PASS_OPEN_TID_G(x) (((x) >> PASS_OPEN_TID_S) & PASS_OPEN_TID_M)
478
479#define PASS_OPEN_TOS_S 24
480#define PASS_OPEN_TOS_M 0xFF
481#define PASS_OPEN_TOS_V(x) ((x) << PASS_OPEN_TOS_S)
482#define PASS_OPEN_TOS_G(x) (((x) >> PASS_OPEN_TOS_S) & PASS_OPEN_TOS_M)
483
484/* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */
485#define TCPOPT_WSCALE_OK_S 5
486#define TCPOPT_WSCALE_OK_M 0x1
487#define TCPOPT_WSCALE_OK_G(x) \
488 (((x) >> TCPOPT_WSCALE_OK_S) & TCPOPT_WSCALE_OK_M)
489
490#define TCPOPT_SACK_S 6
491#define TCPOPT_SACK_M 0x1
492#define TCPOPT_SACK_G(x) (((x) >> TCPOPT_SACK_S) & TCPOPT_SACK_M)
493
494#define TCPOPT_TSTAMP_S 7
495#define TCPOPT_TSTAMP_M 0x1
496#define TCPOPT_TSTAMP_G(x) (((x) >> TCPOPT_TSTAMP_S) & TCPOPT_TSTAMP_M)
497
498#define TCPOPT_SND_WSCALE_S 8
499#define TCPOPT_SND_WSCALE_M 0xF
500#define TCPOPT_SND_WSCALE_G(x) \
501 (((x) >> TCPOPT_SND_WSCALE_S) & TCPOPT_SND_WSCALE_M)
502
503#define TCPOPT_MSS_S 12
504#define TCPOPT_MSS_M 0xF
505#define TCPOPT_MSS_G(x) (((x) >> TCPOPT_MSS_S) & TCPOPT_MSS_M)
506
411struct cpl_act_establish { 507struct cpl_act_establish {
412 union opcode_tid ot; 508 union opcode_tid ot;
413 __be32 rsvd; 509 __be32 rsvd;
@@ -422,24 +518,39 @@ struct cpl_get_tcb {
422 WR_HDR; 518 WR_HDR;
423 union opcode_tid ot; 519 union opcode_tid ot;
424 __be16 reply_ctrl; 520 __be16 reply_ctrl;
425#define QUEUENO(x) ((x) << 0)
426#define REPLY_CHAN(x) ((x) << 14)
427#define NO_REPLY(x) ((x) << 15)
428 __be16 cookie; 521 __be16 cookie;
429}; 522};
430 523
524/* cpl_get_tcb.reply_ctrl fields */
525#define QUEUENO_S 0
526#define QUEUENO_V(x) ((x) << QUEUENO_S)
527
528#define REPLY_CHAN_S 14
529#define REPLY_CHAN_V(x) ((x) << REPLY_CHAN_S)
530#define REPLY_CHAN_F REPLY_CHAN_V(1U)
531
532#define NO_REPLY_S 15
533#define NO_REPLY_V(x) ((x) << NO_REPLY_S)
534#define NO_REPLY_F NO_REPLY_V(1U)
535
431struct cpl_set_tcb_field { 536struct cpl_set_tcb_field {
432 WR_HDR; 537 WR_HDR;
433 union opcode_tid ot; 538 union opcode_tid ot;
434 __be16 reply_ctrl; 539 __be16 reply_ctrl;
435 __be16 word_cookie; 540 __be16 word_cookie;
436#define TCB_WORD(x) ((x) << 0)
437#define TCB_COOKIE(x) ((x) << 5)
438#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
439 __be64 mask; 541 __be64 mask;
440 __be64 val; 542 __be64 val;
441}; 543};
442 544
545/* cpl_set_tcb_field.word_cookie fields */
546#define TCB_WORD_S 0
547#define TCB_WORD(x) ((x) << TCB_WORD_S)
548
549#define TCB_COOKIE_S 5
550#define TCB_COOKIE_M 0x7
551#define TCB_COOKIE_V(x) ((x) << TCB_COOKIE_S)
552#define TCB_COOKIE_G(x) (((x) >> TCB_COOKIE_S) & TCB_COOKIE_M)
553
443struct cpl_set_tcb_rpl { 554struct cpl_set_tcb_rpl {
444 union opcode_tid ot; 555 union opcode_tid ot;
445 __be16 rsvd; 556 __be16 rsvd;
@@ -466,10 +577,14 @@ struct cpl_close_listsvr_req {
466 WR_HDR; 577 WR_HDR;
467 union opcode_tid ot; 578 union opcode_tid ot;
468 __be16 reply_ctrl; 579 __be16 reply_ctrl;
469#define LISTSVR_IPV6(x) ((x) << 14)
470 __be16 rsvd; 580 __be16 rsvd;
471}; 581};
472 582
583/* additional cpl_close_listsvr_req.reply_ctrl field */
584#define LISTSVR_IPV6_S 14
585#define LISTSVR_IPV6_V(x) ((x) << LISTSVR_IPV6_S)
586#define LISTSVR_IPV6_F LISTSVR_IPV6_V(1U)
587
473struct cpl_close_listsvr_rpl { 588struct cpl_close_listsvr_rpl {
474 union opcode_tid ot; 589 union opcode_tid ot;
475 u8 rsvd[3]; 590 u8 rsvd[3];
@@ -565,6 +680,34 @@ struct cpl_tx_pkt_lso_core {
565 /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ 680 /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
566}; 681};
567 682
683/* cpl_tx_pkt_lso_core.lso_ctrl fields */
684#define LSO_TCPHDR_LEN_S 0
685#define LSO_TCPHDR_LEN_V(x) ((x) << LSO_TCPHDR_LEN_S)
686
687#define LSO_IPHDR_LEN_S 4
688#define LSO_IPHDR_LEN_V(x) ((x) << LSO_IPHDR_LEN_S)
689
690#define LSO_ETHHDR_LEN_S 16
691#define LSO_ETHHDR_LEN_V(x) ((x) << LSO_ETHHDR_LEN_S)
692
693#define LSO_IPV6_S 20
694#define LSO_IPV6_V(x) ((x) << LSO_IPV6_S)
695#define LSO_IPV6_F LSO_IPV6_V(1U)
696
697#define LSO_LAST_SLICE_S 22
698#define LSO_LAST_SLICE_V(x) ((x) << LSO_LAST_SLICE_S)
699#define LSO_LAST_SLICE_F LSO_LAST_SLICE_V(1U)
700
701#define LSO_FIRST_SLICE_S 23
702#define LSO_FIRST_SLICE_V(x) ((x) << LSO_FIRST_SLICE_S)
703#define LSO_FIRST_SLICE_F LSO_FIRST_SLICE_V(1U)
704
705#define LSO_OPCODE_S 24
706#define LSO_OPCODE_V(x) ((x) << LSO_OPCODE_S)
707
708#define LSO_T5_XFER_SIZE_S 0
709#define LSO_T5_XFER_SIZE_V(x) ((x) << LSO_T5_XFER_SIZE_S)
710
568struct cpl_tx_pkt_lso { 711struct cpl_tx_pkt_lso {
569 WR_HDR; 712 WR_HDR;
570 struct cpl_tx_pkt_lso_core c; 713 struct cpl_tx_pkt_lso_core c;
@@ -574,8 +717,6 @@ struct cpl_tx_pkt_lso {
574struct cpl_iscsi_hdr { 717struct cpl_iscsi_hdr {
575 union opcode_tid ot; 718 union opcode_tid ot;
576 __be16 pdu_len_ddp; 719 __be16 pdu_len_ddp;
577#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
578#define ISCSI_DDP (1 << 15)
579 __be16 len; 720 __be16 len;
580 __be32 seq; 721 __be32 seq;
581 __be16 urg; 722 __be16 urg;
@@ -583,6 +724,16 @@ struct cpl_iscsi_hdr {
583 u8 status; 724 u8 status;
584}; 725};
585 726
727/* cpl_iscsi_hdr.pdu_len_ddp fields */
728#define ISCSI_PDU_LEN_S 0
729#define ISCSI_PDU_LEN_M 0x7FFF
730#define ISCSI_PDU_LEN_V(x) ((x) << ISCSI_PDU_LEN_S)
731#define ISCSI_PDU_LEN_G(x) (((x) >> ISCSI_PDU_LEN_S) & ISCSI_PDU_LEN_M)
732
733#define ISCSI_DDP_S 15
734#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S)
735#define ISCSI_DDP_F ISCSI_DDP_V(1U)
736
586struct cpl_rx_data { 737struct cpl_rx_data {
587 union opcode_tid ot; 738 union opcode_tid ot;
588 __be16 rsvd; 739 __be16 rsvd;
@@ -639,49 +790,61 @@ struct cpl_rx_pkt {
639 __be16 vlan; 790 __be16 vlan;
640 __be16 len; 791 __be16 len;
641 __be32 l2info; 792 __be32 l2info;
642#define RXF_UDP (1 << 22)
643#define RXF_TCP (1 << 23)
644#define RXF_IP (1 << 24)
645#define RXF_IP6 (1 << 25)
646 __be16 hdr_len; 793 __be16 hdr_len;
647 __be16 err_vec; 794 __be16 err_vec;
648}; 795};
649 796
797#define RXF_UDP_S 22
798#define RXF_UDP_V(x) ((x) << RXF_UDP_S)
799#define RXF_UDP_F RXF_UDP_V(1U)
800
801#define RXF_TCP_S 23
802#define RXF_TCP_V(x) ((x) << RXF_TCP_S)
803#define RXF_TCP_F RXF_TCP_V(1U)
804
805#define RXF_IP_S 24
806#define RXF_IP_V(x) ((x) << RXF_IP_S)
807#define RXF_IP_F RXF_IP_V(1U)
808
809#define RXF_IP6_S 25
810#define RXF_IP6_V(x) ((x) << RXF_IP6_S)
811#define RXF_IP6_F RXF_IP6_V(1U)
812
650/* rx_pkt.l2info fields */ 813/* rx_pkt.l2info fields */
651#define S_RX_ETHHDR_LEN 0 814#define RX_ETHHDR_LEN_S 0
652#define M_RX_ETHHDR_LEN 0x1F 815#define RX_ETHHDR_LEN_M 0x1F
653#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN) 816#define RX_ETHHDR_LEN_V(x) ((x) << RX_ETHHDR_LEN_S)
654#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN) 817#define RX_ETHHDR_LEN_G(x) (((x) >> RX_ETHHDR_LEN_S) & RX_ETHHDR_LEN_M)
655 818
656#define S_RX_T5_ETHHDR_LEN 0 819#define RX_T5_ETHHDR_LEN_S 0
657#define M_RX_T5_ETHHDR_LEN 0x3F 820#define RX_T5_ETHHDR_LEN_M 0x3F
658#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN) 821#define RX_T5_ETHHDR_LEN_V(x) ((x) << RX_T5_ETHHDR_LEN_S)
659#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN) 822#define RX_T5_ETHHDR_LEN_G(x) (((x) >> RX_T5_ETHHDR_LEN_S) & RX_T5_ETHHDR_LEN_M)
660 823
661#define S_RX_MACIDX 8 824#define RX_MACIDX_S 8
662#define M_RX_MACIDX 0x1FF 825#define RX_MACIDX_M 0x1FF
663#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX) 826#define RX_MACIDX_V(x) ((x) << RX_MACIDX_S)
664#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX) 827#define RX_MACIDX_G(x) (((x) >> RX_MACIDX_S) & RX_MACIDX_M)
665 828
666#define S_RXF_SYN 21 829#define RXF_SYN_S 21
667#define V_RXF_SYN(x) ((x) << S_RXF_SYN) 830#define RXF_SYN_V(x) ((x) << RXF_SYN_S)
668#define F_RXF_SYN V_RXF_SYN(1U) 831#define RXF_SYN_F RXF_SYN_V(1U)
669 832
670#define S_RX_CHAN 28 833#define RX_CHAN_S 28
671#define M_RX_CHAN 0xF 834#define RX_CHAN_M 0xF
672#define V_RX_CHAN(x) ((x) << S_RX_CHAN) 835#define RX_CHAN_V(x) ((x) << RX_CHAN_S)
673#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN) 836#define RX_CHAN_G(x) (((x) >> RX_CHAN_S) & RX_CHAN_M)
674 837
675/* rx_pkt.hdr_len fields */ 838/* rx_pkt.hdr_len fields */
676#define S_RX_TCPHDR_LEN 0 839#define RX_TCPHDR_LEN_S 0
677#define M_RX_TCPHDR_LEN 0x3F 840#define RX_TCPHDR_LEN_M 0x3F
678#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN) 841#define RX_TCPHDR_LEN_V(x) ((x) << RX_TCPHDR_LEN_S)
679#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN) 842#define RX_TCPHDR_LEN_G(x) (((x) >> RX_TCPHDR_LEN_S) & RX_TCPHDR_LEN_M)
680 843
681#define S_RX_IPHDR_LEN 6 844#define RX_IPHDR_LEN_S 6
682#define M_RX_IPHDR_LEN 0x3FF 845#define RX_IPHDR_LEN_M 0x3FF
683#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN) 846#define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S)
684#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN) 847#define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M)
685 848
686struct cpl_trace_pkt { 849struct cpl_trace_pkt {
687 u8 opcode; 850 u8 opcode;
@@ -730,14 +893,22 @@ struct cpl_l2t_write_req {
730 WR_HDR; 893 WR_HDR;
731 union opcode_tid ot; 894 union opcode_tid ot;
732 __be16 params; 895 __be16 params;
733#define L2T_W_INFO(x) ((x) << 2)
734#define L2T_W_PORT(x) ((x) << 8)
735#define L2T_W_NOREPLY(x) ((x) << 15)
736 __be16 l2t_idx; 896 __be16 l2t_idx;
737 __be16 vlan; 897 __be16 vlan;
738 u8 dst_mac[6]; 898 u8 dst_mac[6];
739}; 899};
740 900
901/* cpl_l2t_write_req.params fields */
902#define L2T_W_INFO_S 2
903#define L2T_W_INFO_V(x) ((x) << L2T_W_INFO_S)
904
905#define L2T_W_PORT_S 8
906#define L2T_W_PORT_V(x) ((x) << L2T_W_PORT_S)
907
908#define L2T_W_NOREPLY_S 15
909#define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S)
910#define L2T_W_NOREPLY_F L2T_W_NOREPLY_V(1U)
911
741struct cpl_l2t_write_rpl { 912struct cpl_l2t_write_rpl {
742 union opcode_tid ot; 913 union opcode_tid ot;
743 u8 status; 914 u8 status;
@@ -752,11 +923,15 @@ struct cpl_rdma_terminate {
752 923
753struct cpl_sge_egr_update { 924struct cpl_sge_egr_update {
754 __be32 opcode_qid; 925 __be32 opcode_qid;
755#define EGR_QID(x) ((x) & 0x1FFFF)
756 __be16 cidx; 926 __be16 cidx;
757 __be16 pidx; 927 __be16 pidx;
758}; 928};
759 929
930/* cpl_sge_egr_update.ot fields */
931#define EGR_QID_S 0
932#define EGR_QID_M 0x1FFFF
933#define EGR_QID_G(x) (((x) >> EGR_QID_S) & EGR_QID_M)
934
760/* cpl_fw*.type values */ 935/* cpl_fw*.type values */
761enum { 936enum {
762 FW_TYPE_CMD_RPL = 0, 937 FW_TYPE_CMD_RPL = 0,
@@ -849,22 +1024,30 @@ struct ulptx_sge_pair {
849 1024
850struct ulptx_sgl { 1025struct ulptx_sgl {
851 __be32 cmd_nsge; 1026 __be32 cmd_nsge;
852#define ULPTX_NSGE(x) ((x) << 0)
853#define ULPTX_MORE (1U << 23)
854 __be32 len0; 1027 __be32 len0;
855 __be64 addr0; 1028 __be64 addr0;
856 struct ulptx_sge_pair sge[0]; 1029 struct ulptx_sge_pair sge[0];
857}; 1030};
858 1031
1032#define ULPTX_NSGE_S 0
1033#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
1034
1035#define ULPTX_MORE_S 23
1036#define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S)
1037#define ULPTX_MORE_F ULPTX_MORE_V(1U)
1038
859struct ulp_mem_io { 1039struct ulp_mem_io {
860 WR_HDR; 1040 WR_HDR;
861 __be32 cmd; 1041 __be32 cmd;
862 __be32 len16; /* command length */ 1042 __be32 len16; /* command length */
863 __be32 dlen; /* data length in 32-byte units */ 1043 __be32 dlen; /* data length in 32-byte units */
864 __be32 lock_addr; 1044 __be32 lock_addr;
865#define ULP_MEMIO_LOCK(x) ((x) << 31)
866}; 1045};
867 1046
1047#define ULP_MEMIO_LOCK_S 31
1048#define ULP_MEMIO_LOCK_V(x) ((x) << ULP_MEMIO_LOCK_S)
1049#define ULP_MEMIO_LOCK_F ULP_MEMIO_LOCK_V(1U)
1050
868/* additional ulp_mem_io.cmd fields */ 1051/* additional ulp_mem_io.cmd fields */
869#define ULP_MEMIO_ORDER_S 23 1052#define ULP_MEMIO_ORDER_S 23
870#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S) 1053#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S)
@@ -874,13 +1057,9 @@ struct ulp_mem_io {
874#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S) 1057#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S)
875#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U) 1058#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U)
876 1059
877#define S_T5_ULP_MEMIO_IMM 23 1060#define T5_ULP_MEMIO_ORDER_S 22
878#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM) 1061#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
879#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U) 1062#define T5_ULP_MEMIO_ORDER_F T5_ULP_MEMIO_ORDER_V(1U)
880
881#define S_T5_ULP_MEMIO_ORDER 22
882#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
883#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
884 1063
885/* ulp_mem_io.lock_addr fields */ 1064/* ulp_mem_io.lock_addr fields */
886#define ULP_MEMIO_ADDR_S 0 1065#define ULP_MEMIO_ADDR_S 0
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 9e4f95a91fb4..ddfb5b846045 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -153,6 +153,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
153 CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */ 153 CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */
154 CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */ 154 CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
155 CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */ 155 CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
156 CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
156CH_PCI_DEVICE_ID_TABLE_DEFINE_END; 157CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
157 158
158#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */ 159#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index d7bd34ee65bd..231a725f6d5d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -63,460 +63,779 @@
63#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) 63#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
64#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) 64#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
65 65
66#define SGE_PF_KDOORBELL 0x0 66#define SGE_PF_KDOORBELL_A 0x0
67#define QID_MASK 0xffff8000U 67
68#define QID_SHIFT 15 68#define QID_S 15
69#define QID(x) ((x) << QID_SHIFT) 69#define QID_V(x) ((x) << QID_S)
70#define DBPRIO(x) ((x) << 14) 70
71#define DBTYPE(x) ((x) << 13) 71#define DBPRIO_S 14
72#define PIDX_MASK 0x00003fffU 72#define DBPRIO_V(x) ((x) << DBPRIO_S)
73#define PIDX_SHIFT 0 73#define DBPRIO_F DBPRIO_V(1U)
74#define PIDX(x) ((x) << PIDX_SHIFT) 74
75#define PIDX_SHIFT_T5 0 75#define PIDX_S 0
76#define PIDX_T5(x) ((x) << PIDX_SHIFT_T5) 76#define PIDX_V(x) ((x) << PIDX_S)
77 77
78 78#define SGE_VF_KDOORBELL_A 0x0
79#define SGE_TIMERREGS 6 79
80#define SGE_PF_GTS 0x4 80#define DBTYPE_S 13
81#define INGRESSQID_MASK 0xffff0000U 81#define DBTYPE_V(x) ((x) << DBTYPE_S)
82#define INGRESSQID_SHIFT 16 82#define DBTYPE_F DBTYPE_V(1U)
83#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT) 83
84#define TIMERREG_MASK 0x0000e000U 84#define PIDX_T5_S 0
85#define TIMERREG_SHIFT 13 85#define PIDX_T5_M 0x1fffU
86#define TIMERREG(x) ((x) << TIMERREG_SHIFT) 86#define PIDX_T5_V(x) ((x) << PIDX_T5_S)
87#define SEINTARM_MASK 0x00001000U 87#define PIDX_T5_G(x) (((x) >> PIDX_T5_S) & PIDX_T5_M)
88#define SEINTARM_SHIFT 12 88
89#define SEINTARM(x) ((x) << SEINTARM_SHIFT) 89#define SGE_PF_GTS_A 0x4
90#define CIDXINC_MASK 0x00000fffU 90
91#define CIDXINC_SHIFT 0 91#define INGRESSQID_S 16
92#define CIDXINC(x) ((x) << CIDXINC_SHIFT) 92#define INGRESSQID_V(x) ((x) << INGRESSQID_S)
93 93
94#define X_RXPKTCPLMODE_SPLIT 1 94#define TIMERREG_S 13
95#define X_INGPADBOUNDARY_SHIFT 5 95#define TIMERREG_V(x) ((x) << TIMERREG_S)
96 96
97#define SGE_CONTROL 0x1008 97#define SEINTARM_S 12
98#define SGE_CONTROL2_A 0x1124 98#define SEINTARM_V(x) ((x) << SEINTARM_S)
99#define DCASYSTYPE 0x00080000U 99
100#define RXPKTCPLMODE_MASK 0x00040000U 100#define CIDXINC_S 0
101#define RXPKTCPLMODE_SHIFT 18 101#define CIDXINC_M 0xfffU
102#define RXPKTCPLMODE(x) ((x) << RXPKTCPLMODE_SHIFT) 102#define CIDXINC_V(x) ((x) << CIDXINC_S)
103#define EGRSTATUSPAGESIZE_MASK 0x00020000U 103
104#define EGRSTATUSPAGESIZE_SHIFT 17 104#define SGE_CONTROL_A 0x1008
105#define EGRSTATUSPAGESIZE(x) ((x) << EGRSTATUSPAGESIZE_SHIFT) 105#define SGE_CONTROL2_A 0x1124
106#define PKTSHIFT_MASK 0x00001c00U 106
107#define PKTSHIFT_SHIFT 10 107#define RXPKTCPLMODE_S 18
108#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) 108#define RXPKTCPLMODE_V(x) ((x) << RXPKTCPLMODE_S)
109#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) 109#define RXPKTCPLMODE_F RXPKTCPLMODE_V(1U)
110#define INGPCIEBOUNDARY_32B_X 0 110
111#define INGPCIEBOUNDARY_MASK 0x00000380U 111#define EGRSTATUSPAGESIZE_S 17
112#define INGPCIEBOUNDARY_SHIFT 7 112#define EGRSTATUSPAGESIZE_V(x) ((x) << EGRSTATUSPAGESIZE_S)
113#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) 113#define EGRSTATUSPAGESIZE_F EGRSTATUSPAGESIZE_V(1U)
114#define INGPADBOUNDARY_MASK 0x00000070U 114
115#define INGPADBOUNDARY_SHIFT 4 115#define PKTSHIFT_S 10
116#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) 116#define PKTSHIFT_M 0x7U
117#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ 117#define PKTSHIFT_V(x) ((x) << PKTSHIFT_S)
118 >> INGPADBOUNDARY_SHIFT) 118#define PKTSHIFT_G(x) (((x) >> PKTSHIFT_S) & PKTSHIFT_M)
119#define INGPACKBOUNDARY_16B_X 0 119
120#define INGPACKBOUNDARY_SHIFT_X 5 120#define INGPCIEBOUNDARY_S 7
121#define INGPCIEBOUNDARY_V(x) ((x) << INGPCIEBOUNDARY_S)
122
123#define INGPADBOUNDARY_S 4
124#define INGPADBOUNDARY_M 0x7U
125#define INGPADBOUNDARY_V(x) ((x) << INGPADBOUNDARY_S)
126#define INGPADBOUNDARY_G(x) (((x) >> INGPADBOUNDARY_S) & INGPADBOUNDARY_M)
127
128#define EGRPCIEBOUNDARY_S 1
129#define EGRPCIEBOUNDARY_V(x) ((x) << EGRPCIEBOUNDARY_S)
121 130
122#define INGPACKBOUNDARY_S 16 131#define INGPACKBOUNDARY_S 16
123#define INGPACKBOUNDARY_M 0x7U 132#define INGPACKBOUNDARY_M 0x7U
124#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S) 133#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S)
125#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \ 134#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
126 & INGPACKBOUNDARY_M) 135 & INGPACKBOUNDARY_M)
127#define EGRPCIEBOUNDARY_MASK 0x0000000eU
128#define EGRPCIEBOUNDARY_SHIFT 1
129#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
130#define GLOBALENABLE 0x00000001U
131 136
132#define SGE_HOST_PAGE_SIZE 0x100c 137#define GLOBALENABLE_S 0
138#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
139#define GLOBALENABLE_F GLOBALENABLE_V(1U)
140
141#define SGE_HOST_PAGE_SIZE_A 0x100c
142
143#define HOSTPAGESIZEPF7_S 28
144#define HOSTPAGESIZEPF7_M 0xfU
145#define HOSTPAGESIZEPF7_V(x) ((x) << HOSTPAGESIZEPF7_S)
146#define HOSTPAGESIZEPF7_G(x) (((x) >> HOSTPAGESIZEPF7_S) & HOSTPAGESIZEPF7_M)
147
148#define HOSTPAGESIZEPF6_S 24
149#define HOSTPAGESIZEPF6_M 0xfU
150#define HOSTPAGESIZEPF6_V(x) ((x) << HOSTPAGESIZEPF6_S)
151#define HOSTPAGESIZEPF6_G(x) (((x) >> HOSTPAGESIZEPF6_S) & HOSTPAGESIZEPF6_M)
152
153#define HOSTPAGESIZEPF5_S 20
154#define HOSTPAGESIZEPF5_M 0xfU
155#define HOSTPAGESIZEPF5_V(x) ((x) << HOSTPAGESIZEPF5_S)
156#define HOSTPAGESIZEPF5_G(x) (((x) >> HOSTPAGESIZEPF5_S) & HOSTPAGESIZEPF5_M)
157
158#define HOSTPAGESIZEPF4_S 16
159#define HOSTPAGESIZEPF4_M 0xfU
160#define HOSTPAGESIZEPF4_V(x) ((x) << HOSTPAGESIZEPF4_S)
161#define HOSTPAGESIZEPF4_G(x) (((x) >> HOSTPAGESIZEPF4_S) & HOSTPAGESIZEPF4_M)
162
163#define HOSTPAGESIZEPF3_S 12
164#define HOSTPAGESIZEPF3_M 0xfU
165#define HOSTPAGESIZEPF3_V(x) ((x) << HOSTPAGESIZEPF3_S)
166#define HOSTPAGESIZEPF3_G(x) (((x) >> HOSTPAGESIZEPF3_S) & HOSTPAGESIZEPF3_M)
167
168#define HOSTPAGESIZEPF2_S 8
169#define HOSTPAGESIZEPF2_M 0xfU
170#define HOSTPAGESIZEPF2_V(x) ((x) << HOSTPAGESIZEPF2_S)
171#define HOSTPAGESIZEPF2_G(x) (((x) >> HOSTPAGESIZEPF2_S) & HOSTPAGESIZEPF2_M)
172
173#define HOSTPAGESIZEPF1_S 4
174#define HOSTPAGESIZEPF1_M 0xfU
175#define HOSTPAGESIZEPF1_V(x) ((x) << HOSTPAGESIZEPF1_S)
176#define HOSTPAGESIZEPF1_G(x) (((x) >> HOSTPAGESIZEPF1_S) & HOSTPAGESIZEPF1_M)
177
178#define HOSTPAGESIZEPF0_S 0
179#define HOSTPAGESIZEPF0_M 0xfU
180#define HOSTPAGESIZEPF0_V(x) ((x) << HOSTPAGESIZEPF0_S)
181#define HOSTPAGESIZEPF0_G(x) (((x) >> HOSTPAGESIZEPF0_S) & HOSTPAGESIZEPF0_M)
182
183#define SGE_EGRESS_QUEUES_PER_PAGE_PF_A 0x1010
184#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
133 185
134#define HOSTPAGESIZEPF7_MASK 0x0000000fU 186#define QUEUESPERPAGEPF1_S 4
135#define HOSTPAGESIZEPF7_SHIFT 28
136#define HOSTPAGESIZEPF7(x) ((x) << HOSTPAGESIZEPF7_SHIFT)
137 187
138#define HOSTPAGESIZEPF6_MASK 0x0000000fU 188#define QUEUESPERPAGEPF0_S 0
139#define HOSTPAGESIZEPF6_SHIFT 24 189#define QUEUESPERPAGEPF0_M 0xfU
140#define HOSTPAGESIZEPF6(x) ((x) << HOSTPAGESIZEPF6_SHIFT) 190#define QUEUESPERPAGEPF0_V(x) ((x) << QUEUESPERPAGEPF0_S)
191#define QUEUESPERPAGEPF0_G(x) (((x) >> QUEUESPERPAGEPF0_S) & QUEUESPERPAGEPF0_M)
141 192
142#define HOSTPAGESIZEPF5_MASK 0x0000000fU 193#define SGE_INT_CAUSE1_A 0x1024
143#define HOSTPAGESIZEPF5_SHIFT 20 194#define SGE_INT_CAUSE2_A 0x1030
144#define HOSTPAGESIZEPF5(x) ((x) << HOSTPAGESIZEPF5_SHIFT) 195#define SGE_INT_CAUSE3_A 0x103c
196
197#define ERR_FLM_DBP_S 31
198#define ERR_FLM_DBP_V(x) ((x) << ERR_FLM_DBP_S)
199#define ERR_FLM_DBP_F ERR_FLM_DBP_V(1U)
200
201#define ERR_FLM_IDMA1_S 30
202#define ERR_FLM_IDMA1_V(x) ((x) << ERR_FLM_IDMA1_S)
203#define ERR_FLM_IDMA1_F ERR_FLM_IDMA1_V(1U)
204
205#define ERR_FLM_IDMA0_S 29
206#define ERR_FLM_IDMA0_V(x) ((x) << ERR_FLM_IDMA0_S)
207#define ERR_FLM_IDMA0_F ERR_FLM_IDMA0_V(1U)
208
209#define ERR_FLM_HINT_S 28
210#define ERR_FLM_HINT_V(x) ((x) << ERR_FLM_HINT_S)
211#define ERR_FLM_HINT_F ERR_FLM_HINT_V(1U)
212
213#define ERR_PCIE_ERROR3_S 27
214#define ERR_PCIE_ERROR3_V(x) ((x) << ERR_PCIE_ERROR3_S)
215#define ERR_PCIE_ERROR3_F ERR_PCIE_ERROR3_V(1U)
216
217#define ERR_PCIE_ERROR2_S 26
218#define ERR_PCIE_ERROR2_V(x) ((x) << ERR_PCIE_ERROR2_S)
219#define ERR_PCIE_ERROR2_F ERR_PCIE_ERROR2_V(1U)
220
221#define ERR_PCIE_ERROR1_S 25
222#define ERR_PCIE_ERROR1_V(x) ((x) << ERR_PCIE_ERROR1_S)
223#define ERR_PCIE_ERROR1_F ERR_PCIE_ERROR1_V(1U)
224
225#define ERR_PCIE_ERROR0_S 24
226#define ERR_PCIE_ERROR0_V(x) ((x) << ERR_PCIE_ERROR0_S)
227#define ERR_PCIE_ERROR0_F ERR_PCIE_ERROR0_V(1U)
228
229#define ERR_CPL_EXCEED_IQE_SIZE_S 22
230#define ERR_CPL_EXCEED_IQE_SIZE_V(x) ((x) << ERR_CPL_EXCEED_IQE_SIZE_S)
231#define ERR_CPL_EXCEED_IQE_SIZE_F ERR_CPL_EXCEED_IQE_SIZE_V(1U)
232
233#define ERR_INVALID_CIDX_INC_S 21
234#define ERR_INVALID_CIDX_INC_V(x) ((x) << ERR_INVALID_CIDX_INC_S)
235#define ERR_INVALID_CIDX_INC_F ERR_INVALID_CIDX_INC_V(1U)
236
237#define ERR_CPL_OPCODE_0_S 19
238#define ERR_CPL_OPCODE_0_V(x) ((x) << ERR_CPL_OPCODE_0_S)
239#define ERR_CPL_OPCODE_0_F ERR_CPL_OPCODE_0_V(1U)
240
241#define ERR_DROPPED_DB_S 18
242#define ERR_DROPPED_DB_V(x) ((x) << ERR_DROPPED_DB_S)
243#define ERR_DROPPED_DB_F ERR_DROPPED_DB_V(1U)
244
245#define ERR_DATA_CPL_ON_HIGH_QID1_S 17
246#define ERR_DATA_CPL_ON_HIGH_QID1_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID1_S)
247#define ERR_DATA_CPL_ON_HIGH_QID1_F ERR_DATA_CPL_ON_HIGH_QID1_V(1U)
248
249#define ERR_DATA_CPL_ON_HIGH_QID0_S 16
250#define ERR_DATA_CPL_ON_HIGH_QID0_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID0_S)
251#define ERR_DATA_CPL_ON_HIGH_QID0_F ERR_DATA_CPL_ON_HIGH_QID0_V(1U)
252
253#define ERR_BAD_DB_PIDX3_S 15
254#define ERR_BAD_DB_PIDX3_V(x) ((x) << ERR_BAD_DB_PIDX3_S)
255#define ERR_BAD_DB_PIDX3_F ERR_BAD_DB_PIDX3_V(1U)
256
257#define ERR_BAD_DB_PIDX2_S 14
258#define ERR_BAD_DB_PIDX2_V(x) ((x) << ERR_BAD_DB_PIDX2_S)
259#define ERR_BAD_DB_PIDX2_F ERR_BAD_DB_PIDX2_V(1U)
260
261#define ERR_BAD_DB_PIDX1_S 13
262#define ERR_BAD_DB_PIDX1_V(x) ((x) << ERR_BAD_DB_PIDX1_S)
263#define ERR_BAD_DB_PIDX1_F ERR_BAD_DB_PIDX1_V(1U)
264
265#define ERR_BAD_DB_PIDX0_S 12
266#define ERR_BAD_DB_PIDX0_V(x) ((x) << ERR_BAD_DB_PIDX0_S)
267#define ERR_BAD_DB_PIDX0_F ERR_BAD_DB_PIDX0_V(1U)
268
269#define ERR_ING_CTXT_PRIO_S 10
270#define ERR_ING_CTXT_PRIO_V(x) ((x) << ERR_ING_CTXT_PRIO_S)
271#define ERR_ING_CTXT_PRIO_F ERR_ING_CTXT_PRIO_V(1U)
272
273#define ERR_EGR_CTXT_PRIO_S 9
274#define ERR_EGR_CTXT_PRIO_V(x) ((x) << ERR_EGR_CTXT_PRIO_S)
275#define ERR_EGR_CTXT_PRIO_F ERR_EGR_CTXT_PRIO_V(1U)
276
277#define DBFIFO_HP_INT_S 8
278#define DBFIFO_HP_INT_V(x) ((x) << DBFIFO_HP_INT_S)
279#define DBFIFO_HP_INT_F DBFIFO_HP_INT_V(1U)
280
281#define DBFIFO_LP_INT_S 7
282#define DBFIFO_LP_INT_V(x) ((x) << DBFIFO_LP_INT_S)
283#define DBFIFO_LP_INT_F DBFIFO_LP_INT_V(1U)
284
285#define INGRESS_SIZE_ERR_S 5
286#define INGRESS_SIZE_ERR_V(x) ((x) << INGRESS_SIZE_ERR_S)
287#define INGRESS_SIZE_ERR_F INGRESS_SIZE_ERR_V(1U)
288
289#define EGRESS_SIZE_ERR_S 4
290#define EGRESS_SIZE_ERR_V(x) ((x) << EGRESS_SIZE_ERR_S)
291#define EGRESS_SIZE_ERR_F EGRESS_SIZE_ERR_V(1U)
292
293#define SGE_INT_ENABLE3_A 0x1040
294#define SGE_FL_BUFFER_SIZE0_A 0x1044
295#define SGE_FL_BUFFER_SIZE1_A 0x1048
296#define SGE_FL_BUFFER_SIZE2_A 0x104c
297#define SGE_FL_BUFFER_SIZE3_A 0x1050
298#define SGE_FL_BUFFER_SIZE4_A 0x1054
299#define SGE_FL_BUFFER_SIZE5_A 0x1058
300#define SGE_FL_BUFFER_SIZE6_A 0x105c
301#define SGE_FL_BUFFER_SIZE7_A 0x1060
302#define SGE_FL_BUFFER_SIZE8_A 0x1064
303
304#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
305
306#define THRESHOLD_0_S 24
307#define THRESHOLD_0_M 0x3fU
308#define THRESHOLD_0_V(x) ((x) << THRESHOLD_0_S)
309#define THRESHOLD_0_G(x) (((x) >> THRESHOLD_0_S) & THRESHOLD_0_M)
310
311#define THRESHOLD_1_S 16
312#define THRESHOLD_1_M 0x3fU
313#define THRESHOLD_1_V(x) ((x) << THRESHOLD_1_S)
314#define THRESHOLD_1_G(x) (((x) >> THRESHOLD_1_S) & THRESHOLD_1_M)
315
316#define THRESHOLD_2_S 8
317#define THRESHOLD_2_M 0x3fU
318#define THRESHOLD_2_V(x) ((x) << THRESHOLD_2_S)
319#define THRESHOLD_2_G(x) (((x) >> THRESHOLD_2_S) & THRESHOLD_2_M)
320
321#define THRESHOLD_3_S 0
322#define THRESHOLD_3_M 0x3fU
323#define THRESHOLD_3_V(x) ((x) << THRESHOLD_3_S)
324#define THRESHOLD_3_G(x) (((x) >> THRESHOLD_3_S) & THRESHOLD_3_M)
325
326#define SGE_CONM_CTRL_A 0x1094
327
328#define EGRTHRESHOLD_S 8
329#define EGRTHRESHOLD_M 0x3fU
330#define EGRTHRESHOLD_V(x) ((x) << EGRTHRESHOLD_S)
331#define EGRTHRESHOLD_G(x) (((x) >> EGRTHRESHOLD_S) & EGRTHRESHOLD_M)
332
333#define EGRTHRESHOLDPACKING_S 14
334#define EGRTHRESHOLDPACKING_M 0x3fU
335#define EGRTHRESHOLDPACKING_V(x) ((x) << EGRTHRESHOLDPACKING_S)
336#define EGRTHRESHOLDPACKING_G(x) \
337 (((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
338
339#define SGE_TIMESTAMP_LO_A 0x1098
340#define SGE_TIMESTAMP_HI_A 0x109c
341
342#define TSOP_S 28
343#define TSOP_M 0x3U
344#define TSOP_V(x) ((x) << TSOP_S)
345#define TSOP_G(x) (((x) >> TSOP_S) & TSOP_M)
346
347#define TSVAL_S 0
348#define TSVAL_M 0xfffffffU
349#define TSVAL_V(x) ((x) << TSVAL_S)
350#define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
351
352#define SGE_DBFIFO_STATUS_A 0x10a4
353
354#define HP_INT_THRESH_S 28
355#define HP_INT_THRESH_M 0xfU
356#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
357
358#define LP_INT_THRESH_S 12
359#define LP_INT_THRESH_M 0xfU
360#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
361
362#define SGE_DOORBELL_CONTROL_A 0x10a8
363
364#define NOCOALESCE_S 26
365#define NOCOALESCE_V(x) ((x) << NOCOALESCE_S)
366#define NOCOALESCE_F NOCOALESCE_V(1U)
367
368#define ENABLE_DROP_S 13
369#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
370#define ENABLE_DROP_F ENABLE_DROP_V(1U)
371
372#define SGE_TIMER_VALUE_0_AND_1_A 0x10b8
373
374#define TIMERVALUE0_S 16
375#define TIMERVALUE0_M 0xffffU
376#define TIMERVALUE0_V(x) ((x) << TIMERVALUE0_S)
377#define TIMERVALUE0_G(x) (((x) >> TIMERVALUE0_S) & TIMERVALUE0_M)
378
379#define TIMERVALUE1_S 0
380#define TIMERVALUE1_M 0xffffU
381#define TIMERVALUE1_V(x) ((x) << TIMERVALUE1_S)
382#define TIMERVALUE1_G(x) (((x) >> TIMERVALUE1_S) & TIMERVALUE1_M)
383
384#define SGE_TIMER_VALUE_2_AND_3_A 0x10bc
385
386#define TIMERVALUE2_S 16
387#define TIMERVALUE2_M 0xffffU
388#define TIMERVALUE2_V(x) ((x) << TIMERVALUE2_S)
389#define TIMERVALUE2_G(x) (((x) >> TIMERVALUE2_S) & TIMERVALUE2_M)
390
391#define TIMERVALUE3_S 0
392#define TIMERVALUE3_M 0xffffU
393#define TIMERVALUE3_V(x) ((x) << TIMERVALUE3_S)
394#define TIMERVALUE3_G(x) (((x) >> TIMERVALUE3_S) & TIMERVALUE3_M)
395
396#define SGE_TIMER_VALUE_4_AND_5_A 0x10c0
397
398#define TIMERVALUE4_S 16
399#define TIMERVALUE4_M 0xffffU
400#define TIMERVALUE4_V(x) ((x) << TIMERVALUE4_S)
401#define TIMERVALUE4_G(x) (((x) >> TIMERVALUE4_S) & TIMERVALUE4_M)
145 402
146#define HOSTPAGESIZEPF4_MASK 0x0000000fU 403#define TIMERVALUE5_S 0
147#define HOSTPAGESIZEPF4_SHIFT 16 404#define TIMERVALUE5_M 0xffffU
148#define HOSTPAGESIZEPF4(x) ((x) << HOSTPAGESIZEPF4_SHIFT) 405#define TIMERVALUE5_V(x) ((x) << TIMERVALUE5_S)
406#define TIMERVALUE5_G(x) (((x) >> TIMERVALUE5_S) & TIMERVALUE5_M)
149 407
150#define HOSTPAGESIZEPF3_MASK 0x0000000fU 408#define SGE_DEBUG_INDEX_A 0x10cc
151#define HOSTPAGESIZEPF3_SHIFT 12 409#define SGE_DEBUG_DATA_HIGH_A 0x10d0
152#define HOSTPAGESIZEPF3(x) ((x) << HOSTPAGESIZEPF3_SHIFT) 410#define SGE_DEBUG_DATA_LOW_A 0x10d4
153 411
154#define HOSTPAGESIZEPF2_MASK 0x0000000fU 412#define SGE_DEBUG_DATA_LOW_INDEX_2_A 0x12c8
155#define HOSTPAGESIZEPF2_SHIFT 8 413#define SGE_DEBUG_DATA_LOW_INDEX_3_A 0x12cc
156#define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT) 414#define SGE_DEBUG_DATA_HIGH_INDEX_10_A 0x12a8
157 415
158#define HOSTPAGESIZEPF1_M 0x0000000fU 416#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
159#define HOSTPAGESIZEPF1_S 4 417#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
160#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_S)
161 418
162#define HOSTPAGESIZEPF0_M 0x0000000fU 419#define HP_INT_THRESH_S 28
163#define HOSTPAGESIZEPF0_S 0 420#define HP_INT_THRESH_M 0xfU
164#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_S) 421#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
165 422
166#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 423#define HP_COUNT_S 16
167#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014 424#define HP_COUNT_M 0x7ffU
425#define HP_COUNT_G(x) (((x) >> HP_COUNT_S) & HP_COUNT_M)
168 426
169#define QUEUESPERPAGEPF1_S 4 427#define LP_INT_THRESH_S 12
428#define LP_INT_THRESH_M 0xfU
429#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
170 430
171#define QUEUESPERPAGEPF0_S 0 431#define LP_COUNT_S 0
172#define QUEUESPERPAGEPF0_MASK 0x0000000fU 432#define LP_COUNT_M 0x7ffU
173#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) 433#define LP_COUNT_G(x) (((x) >> LP_COUNT_S) & LP_COUNT_M)
174 434
175#define QUEUESPERPAGEPF0 0 435#define LP_INT_THRESH_T5_S 18
176#define QUEUESPERPAGEPF1 4 436#define LP_INT_THRESH_T5_M 0xfffU
437#define LP_INT_THRESH_T5_V(x) ((x) << LP_INT_THRESH_T5_S)
177 438
178/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues. 439#define LP_COUNT_T5_S 0
179 * The User Doorbells are each 128 bytes in length with a Simple Doorbell at 440#define LP_COUNT_T5_M 0x3ffffU
180 * offsets 8x and a Write Combining single 64-byte Egress Queue Unit 441#define LP_COUNT_T5_G(x) (((x) >> LP_COUNT_T5_S) & LP_COUNT_T5_M)
181 * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues, 442
182 * we have a Going To Sleep register at offsets 8x+4. 443#define SGE_DOORBELL_CONTROL_A 0x10a8
183 * 444
184 * As noted above, we have many instances of the Simple Doorbell and Going To 445#define SGE_STAT_TOTAL_A 0x10e4
185 * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a 446#define SGE_STAT_MATCH_A 0x10e8
186 * non-64-byte aligned offset for the Simple Doorbell in order to attempt to 447#define SGE_STAT_CFG_A 0x10ec
187 * avoid buffering of the writes to the Simple Doorbell and we want to use a 448
188 * non-contiguous offset for the Going To Sleep writes in order to avoid 449#define STATSOURCE_T5_S 9
189 * possible combining between them. 450#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
190 */ 451
191#define SGE_UDB_SIZE 128 452#define SGE_DBFIFO_STATUS2_A 0x1118
192#define SGE_UDB_KDOORBELL 8 453
193#define SGE_UDB_GTS 20 454#define HP_INT_THRESH_T5_S 10
194#define SGE_UDB_WCDOORBELL 64 455#define HP_INT_THRESH_T5_M 0xfU
195 456#define HP_INT_THRESH_T5_V(x) ((x) << HP_INT_THRESH_T5_S)
196#define SGE_INT_CAUSE1 0x1024 457
197#define SGE_INT_CAUSE2 0x1030 458#define HP_COUNT_T5_S 0
198#define SGE_INT_CAUSE3 0x103c 459#define HP_COUNT_T5_M 0x3ffU
199#define ERR_FLM_DBP 0x80000000U 460#define HP_COUNT_T5_G(x) (((x) >> HP_COUNT_T5_S) & HP_COUNT_T5_M)
200#define ERR_FLM_IDMA1 0x40000000U 461
201#define ERR_FLM_IDMA0 0x20000000U 462#define ENABLE_DROP_S 13
202#define ERR_FLM_HINT 0x10000000U 463#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
203#define ERR_PCIE_ERROR3 0x08000000U 464#define ENABLE_DROP_F ENABLE_DROP_V(1U)
204#define ERR_PCIE_ERROR2 0x04000000U 465
205#define ERR_PCIE_ERROR1 0x02000000U 466#define DROPPED_DB_S 0
206#define ERR_PCIE_ERROR0 0x01000000U 467#define DROPPED_DB_V(x) ((x) << DROPPED_DB_S)
207#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U 468#define DROPPED_DB_F DROPPED_DB_V(1U)
208#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U 469
209#define ERR_INVALID_CIDX_INC 0x00200000U 470#define SGE_CTXT_CMD_A 0x11fc
210#define ERR_ITP_TIME_PAUSED 0x00100000U 471#define SGE_DBQ_CTXT_BADDR_A 0x1084
211#define ERR_CPL_OPCODE_0 0x00080000U 472
212#define ERR_DROPPED_DB 0x00040000U 473/* registers for module PCIE */
213#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U 474#define PCIE_PF_CFG_A 0x40
214#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U 475
215#define ERR_BAD_DB_PIDX3 0x00008000U 476#define AIVEC_S 4
216#define ERR_BAD_DB_PIDX2 0x00004000U 477#define AIVEC_M 0x3ffU
217#define ERR_BAD_DB_PIDX1 0x00002000U 478#define AIVEC_V(x) ((x) << AIVEC_S)
218#define ERR_BAD_DB_PIDX0 0x00001000U 479
219#define ERR_ING_PCIE_CHAN 0x00000800U 480#define PCIE_PF_CLI_A 0x44
220#define ERR_ING_CTXT_PRIO 0x00000400U 481#define PCIE_INT_CAUSE_A 0x3004
221#define ERR_EGR_CTXT_PRIO 0x00000200U 482
222#define DBFIFO_HP_INT 0x00000100U 483#define UNXSPLCPLERR_S 29
223#define DBFIFO_LP_INT 0x00000080U 484#define UNXSPLCPLERR_V(x) ((x) << UNXSPLCPLERR_S)
224#define REG_ADDRESS_ERR 0x00000040U 485#define UNXSPLCPLERR_F UNXSPLCPLERR_V(1U)
225#define INGRESS_SIZE_ERR 0x00000020U 486
226#define EGRESS_SIZE_ERR 0x00000010U 487#define PCIEPINT_S 28
227#define ERR_INV_CTXT3 0x00000008U 488#define PCIEPINT_V(x) ((x) << PCIEPINT_S)
228#define ERR_INV_CTXT2 0x00000004U 489#define PCIEPINT_F PCIEPINT_V(1U)
229#define ERR_INV_CTXT1 0x00000002U 490
230#define ERR_INV_CTXT0 0x00000001U 491#define PCIESINT_S 27
231 492#define PCIESINT_V(x) ((x) << PCIESINT_S)
232#define SGE_INT_ENABLE3 0x1040 493#define PCIESINT_F PCIESINT_V(1U)
233#define SGE_FL_BUFFER_SIZE0 0x1044 494
234#define SGE_FL_BUFFER_SIZE1 0x1048 495#define RPLPERR_S 26
235#define SGE_FL_BUFFER_SIZE2 0x104c 496#define RPLPERR_V(x) ((x) << RPLPERR_S)
236#define SGE_FL_BUFFER_SIZE3 0x1050 497#define RPLPERR_F RPLPERR_V(1U)
237#define SGE_FL_BUFFER_SIZE4 0x1054 498
238#define SGE_FL_BUFFER_SIZE5 0x1058 499#define RXWRPERR_S 25
239#define SGE_FL_BUFFER_SIZE6 0x105c 500#define RXWRPERR_V(x) ((x) << RXWRPERR_S)
240#define SGE_FL_BUFFER_SIZE7 0x1060 501#define RXWRPERR_F RXWRPERR_V(1U)
241#define SGE_FL_BUFFER_SIZE8 0x1064 502
242 503#define RXCPLPERR_S 24
243#define SGE_INGRESS_RX_THRESHOLD 0x10a0 504#define RXCPLPERR_V(x) ((x) << RXCPLPERR_S)
244#define THRESHOLD_0_MASK 0x3f000000U 505#define RXCPLPERR_F RXCPLPERR_V(1U)
245#define THRESHOLD_0_SHIFT 24 506
246#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT) 507#define PIOTAGPERR_S 23
247#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT) 508#define PIOTAGPERR_V(x) ((x) << PIOTAGPERR_S)
248#define THRESHOLD_1_MASK 0x003f0000U 509#define PIOTAGPERR_F PIOTAGPERR_V(1U)
249#define THRESHOLD_1_SHIFT 16 510
250#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT) 511#define MATAGPERR_S 22
251#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT) 512#define MATAGPERR_V(x) ((x) << MATAGPERR_S)
252#define THRESHOLD_2_MASK 0x00003f00U 513#define MATAGPERR_F MATAGPERR_V(1U)
253#define THRESHOLD_2_SHIFT 8 514
254#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT) 515#define INTXCLRPERR_S 21
255#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT) 516#define INTXCLRPERR_V(x) ((x) << INTXCLRPERR_S)
256#define THRESHOLD_3_MASK 0x0000003fU 517#define INTXCLRPERR_F INTXCLRPERR_V(1U)
257#define THRESHOLD_3_SHIFT 0 518
258#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) 519#define FIDPERR_S 20
259#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) 520#define FIDPERR_V(x) ((x) << FIDPERR_S)
260 521#define FIDPERR_F FIDPERR_V(1U)
261#define SGE_CONM_CTRL 0x1094 522
262#define EGRTHRESHOLD_MASK 0x00003f00U 523#define CFGSNPPERR_S 19
263#define EGRTHRESHOLDshift 8 524#define CFGSNPPERR_V(x) ((x) << CFGSNPPERR_S)
264#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift) 525#define CFGSNPPERR_F CFGSNPPERR_V(1U)
265#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift) 526
266 527#define HRSPPERR_S 18
267#define EGRTHRESHOLDPACKING_MASK 0x3fU 528#define HRSPPERR_V(x) ((x) << HRSPPERR_S)
268#define EGRTHRESHOLDPACKING_SHIFT 14 529#define HRSPPERR_F HRSPPERR_V(1U)
269#define EGRTHRESHOLDPACKING(x) ((x) << EGRTHRESHOLDPACKING_SHIFT) 530
270#define EGRTHRESHOLDPACKING_GET(x) (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \ 531#define HREQPERR_S 17
271 EGRTHRESHOLDPACKING_MASK) 532#define HREQPERR_V(x) ((x) << HREQPERR_S)
272 533#define HREQPERR_F HREQPERR_V(1U)
273#define SGE_DBFIFO_STATUS 0x10a4 534
274#define HP_INT_THRESH_SHIFT 28 535#define HCNTPERR_S 16
275#define HP_INT_THRESH_MASK 0xfU 536#define HCNTPERR_V(x) ((x) << HCNTPERR_S)
276#define HP_INT_THRESH(x) ((x) << HP_INT_THRESH_SHIFT) 537#define HCNTPERR_F HCNTPERR_V(1U)
277#define LP_INT_THRESH_SHIFT 12 538
278#define LP_INT_THRESH_MASK 0xfU 539#define DRSPPERR_S 15
279#define LP_INT_THRESH(x) ((x) << LP_INT_THRESH_SHIFT) 540#define DRSPPERR_V(x) ((x) << DRSPPERR_S)
280 541#define DRSPPERR_F DRSPPERR_V(1U)
281#define SGE_DOORBELL_CONTROL 0x10a8 542
282#define ENABLE_DROP (1 << 13) 543#define DREQPERR_S 14
283 544#define DREQPERR_V(x) ((x) << DREQPERR_S)
284#define S_NOCOALESCE 26 545#define DREQPERR_F DREQPERR_V(1U)
285#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE) 546
286#define F_NOCOALESCE V_NOCOALESCE(1U) 547#define DCNTPERR_S 13
287 548#define DCNTPERR_V(x) ((x) << DCNTPERR_S)
288#define SGE_TIMESTAMP_LO 0x1098 549#define DCNTPERR_F DCNTPERR_V(1U)
289#define SGE_TIMESTAMP_HI 0x109c 550
290#define S_TSVAL 0 551#define CRSPPERR_S 12
291#define M_TSVAL 0xfffffffU 552#define CRSPPERR_V(x) ((x) << CRSPPERR_S)
292#define GET_TSVAL(x) (((x) >> S_TSVAL) & M_TSVAL) 553#define CRSPPERR_F CRSPPERR_V(1U)
293 554
294#define SGE_TIMER_VALUE_0_AND_1 0x10b8 555#define CREQPERR_S 11
295#define TIMERVALUE0_MASK 0xffff0000U 556#define CREQPERR_V(x) ((x) << CREQPERR_S)
296#define TIMERVALUE0_SHIFT 16 557#define CREQPERR_F CREQPERR_V(1U)
297#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT) 558
298#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT) 559#define CCNTPERR_S 10
299#define TIMERVALUE1_MASK 0x0000ffffU 560#define CCNTPERR_V(x) ((x) << CCNTPERR_S)
300#define TIMERVALUE1_SHIFT 0 561#define CCNTPERR_F CCNTPERR_V(1U)
301#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT) 562
302#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) 563#define TARTAGPERR_S 9
303 564#define TARTAGPERR_V(x) ((x) << TARTAGPERR_S)
304#define SGE_TIMER_VALUE_2_AND_3 0x10bc 565#define TARTAGPERR_F TARTAGPERR_V(1U)
305#define TIMERVALUE2_MASK 0xffff0000U 566
306#define TIMERVALUE2_SHIFT 16 567#define PIOREQPERR_S 8
307#define TIMERVALUE2(x) ((x) << TIMERVALUE2_SHIFT) 568#define PIOREQPERR_V(x) ((x) << PIOREQPERR_S)
308#define TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT) 569#define PIOREQPERR_F PIOREQPERR_V(1U)
309#define TIMERVALUE3_MASK 0x0000ffffU 570
310#define TIMERVALUE3_SHIFT 0 571#define PIOCPLPERR_S 7
311#define TIMERVALUE3(x) ((x) << TIMERVALUE3_SHIFT) 572#define PIOCPLPERR_V(x) ((x) << PIOCPLPERR_S)
312#define TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT) 573#define PIOCPLPERR_F PIOCPLPERR_V(1U)
313 574
314#define SGE_TIMER_VALUE_4_AND_5 0x10c0 575#define MSIXDIPERR_S 6
315#define TIMERVALUE4_MASK 0xffff0000U 576#define MSIXDIPERR_V(x) ((x) << MSIXDIPERR_S)
316#define TIMERVALUE4_SHIFT 16 577#define MSIXDIPERR_F MSIXDIPERR_V(1U)
317#define TIMERVALUE4(x) ((x) << TIMERVALUE4_SHIFT) 578
318#define TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT) 579#define MSIXDATAPERR_S 5
319#define TIMERVALUE5_MASK 0x0000ffffU 580#define MSIXDATAPERR_V(x) ((x) << MSIXDATAPERR_S)
320#define TIMERVALUE5_SHIFT 0 581#define MSIXDATAPERR_F MSIXDATAPERR_V(1U)
321#define TIMERVALUE5(x) ((x) << TIMERVALUE5_SHIFT) 582
322#define TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT) 583#define MSIXADDRHPERR_S 4
323 584#define MSIXADDRHPERR_V(x) ((x) << MSIXADDRHPERR_S)
324#define SGE_DEBUG_INDEX 0x10cc 585#define MSIXADDRHPERR_F MSIXADDRHPERR_V(1U)
325#define SGE_DEBUG_DATA_HIGH 0x10d0 586
326#define SGE_DEBUG_DATA_LOW 0x10d4 587#define MSIXADDRLPERR_S 3
327#define SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8 588#define MSIXADDRLPERR_V(x) ((x) << MSIXADDRLPERR_S)
328#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc 589#define MSIXADDRLPERR_F MSIXADDRLPERR_V(1U)
329#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8 590
330#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 591#define MSIDATAPERR_S 2
331#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8 592#define MSIDATAPERR_V(x) ((x) << MSIDATAPERR_S)
593#define MSIDATAPERR_F MSIDATAPERR_V(1U)
594
595#define MSIADDRHPERR_S 1
596#define MSIADDRHPERR_V(x) ((x) << MSIADDRHPERR_S)
597#define MSIADDRHPERR_F MSIADDRHPERR_V(1U)
598
599#define MSIADDRLPERR_S 0
600#define MSIADDRLPERR_V(x) ((x) << MSIADDRLPERR_S)
601#define MSIADDRLPERR_F MSIADDRLPERR_V(1U)
602
603#define READRSPERR_S 29
604#define READRSPERR_V(x) ((x) << READRSPERR_S)
605#define READRSPERR_F READRSPERR_V(1U)
606
607#define TRGT1GRPPERR_S 28
608#define TRGT1GRPPERR_V(x) ((x) << TRGT1GRPPERR_S)
609#define TRGT1GRPPERR_F TRGT1GRPPERR_V(1U)
610
611#define IPSOTPERR_S 27
612#define IPSOTPERR_V(x) ((x) << IPSOTPERR_S)
613#define IPSOTPERR_F IPSOTPERR_V(1U)
614
615#define IPRETRYPERR_S 26
616#define IPRETRYPERR_V(x) ((x) << IPRETRYPERR_S)
617#define IPRETRYPERR_F IPRETRYPERR_V(1U)
618
619#define IPRXDATAGRPPERR_S 25
620#define IPRXDATAGRPPERR_V(x) ((x) << IPRXDATAGRPPERR_S)
621#define IPRXDATAGRPPERR_F IPRXDATAGRPPERR_V(1U)
622
623#define IPRXHDRGRPPERR_S 24
624#define IPRXHDRGRPPERR_V(x) ((x) << IPRXHDRGRPPERR_S)
625#define IPRXHDRGRPPERR_F IPRXHDRGRPPERR_V(1U)
626
627#define MAGRPPERR_S 22
628#define MAGRPPERR_V(x) ((x) << MAGRPPERR_S)
629#define MAGRPPERR_F MAGRPPERR_V(1U)
630
631#define VFIDPERR_S 21
632#define VFIDPERR_V(x) ((x) << VFIDPERR_S)
633#define VFIDPERR_F VFIDPERR_V(1U)
634
635#define HREQWRPERR_S 16
636#define HREQWRPERR_V(x) ((x) << HREQWRPERR_S)
637#define HREQWRPERR_F HREQWRPERR_V(1U)
638
639#define DREQWRPERR_S 13
640#define DREQWRPERR_V(x) ((x) << DREQWRPERR_S)
641#define DREQWRPERR_F DREQWRPERR_V(1U)
642
643#define CREQRDPERR_S 11
644#define CREQRDPERR_V(x) ((x) << CREQRDPERR_S)
645#define CREQRDPERR_F CREQRDPERR_V(1U)
646
647#define MSTTAGQPERR_S 10
648#define MSTTAGQPERR_V(x) ((x) << MSTTAGQPERR_S)
649#define MSTTAGQPERR_F MSTTAGQPERR_V(1U)
650
651#define PIOREQGRPPERR_S 8
652#define PIOREQGRPPERR_V(x) ((x) << PIOREQGRPPERR_S)
653#define PIOREQGRPPERR_F PIOREQGRPPERR_V(1U)
654
655#define PIOCPLGRPPERR_S 7
656#define PIOCPLGRPPERR_V(x) ((x) << PIOCPLGRPPERR_S)
657#define PIOCPLGRPPERR_F PIOCPLGRPPERR_V(1U)
658
659#define MSIXSTIPERR_S 2
660#define MSIXSTIPERR_V(x) ((x) << MSIXSTIPERR_S)
661#define MSIXSTIPERR_F MSIXSTIPERR_V(1U)
662
663#define MSTTIMEOUTPERR_S 1
664#define MSTTIMEOUTPERR_V(x) ((x) << MSTTIMEOUTPERR_S)
665#define MSTTIMEOUTPERR_F MSTTIMEOUTPERR_V(1U)
666
667#define MSTGRPPERR_S 0
668#define MSTGRPPERR_V(x) ((x) << MSTGRPPERR_S)
669#define MSTGRPPERR_F MSTGRPPERR_V(1U)
670
671#define PCIE_NONFAT_ERR_A 0x3010
672#define PCIE_CFG_SPACE_REQ_A 0x3060
673#define PCIE_CFG_SPACE_DATA_A 0x3064
674#define PCIE_MEM_ACCESS_BASE_WIN_A 0x3068
675
676#define PCIEOFST_S 10
677#define PCIEOFST_M 0x3fffffU
678#define PCIEOFST_G(x) (((x) >> PCIEOFST_S) & PCIEOFST_M)
679
680#define BIR_S 8
681#define BIR_M 0x3U
682#define BIR_V(x) ((x) << BIR_S)
683#define BIR_G(x) (((x) >> BIR_S) & BIR_M)
684
685#define WINDOW_S 0
686#define WINDOW_M 0xffU
687#define WINDOW_V(x) ((x) << WINDOW_S)
688#define WINDOW_G(x) (((x) >> WINDOW_S) & WINDOW_M)
689
690#define PCIE_MEM_ACCESS_OFFSET_A 0x306c
691
692#define ENABLE_S 30
693#define ENABLE_V(x) ((x) << ENABLE_S)
694#define ENABLE_F ENABLE_V(1U)
695
696#define LOCALCFG_S 28
697#define LOCALCFG_V(x) ((x) << LOCALCFG_S)
698#define LOCALCFG_F LOCALCFG_V(1U)
699
700#define FUNCTION_S 12
701#define FUNCTION_V(x) ((x) << FUNCTION_S)
702
703#define REGISTER_S 0
704#define REGISTER_V(x) ((x) << REGISTER_S)
705
706#define PFNUM_S 0
707#define PFNUM_V(x) ((x) << PFNUM_S)
708
709#define PCIE_FW_A 0x30b8
710
711#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
712
713#define RNPP_S 31
714#define RNPP_V(x) ((x) << RNPP_S)
715#define RNPP_F RNPP_V(1U)
716
717#define RPCP_S 29
718#define RPCP_V(x) ((x) << RPCP_S)
719#define RPCP_F RPCP_V(1U)
720
721#define RCIP_S 27
722#define RCIP_V(x) ((x) << RCIP_S)
723#define RCIP_F RCIP_V(1U)
724
725#define RCCP_S 26
726#define RCCP_V(x) ((x) << RCCP_S)
727#define RCCP_F RCCP_V(1U)
728
729#define RFTP_S 23
730#define RFTP_V(x) ((x) << RFTP_S)
731#define RFTP_F RFTP_V(1U)
732
733#define PTRP_S 20
734#define PTRP_V(x) ((x) << PTRP_S)
735#define PTRP_F PTRP_V(1U)
332 736
333#define S_HP_INT_THRESH 28 737#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A 0x59a4
334#define M_HP_INT_THRESH 0xfU
335#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
336#define S_LP_INT_THRESH_T5 18
337#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5)
338#define M_LP_COUNT_T5 0x3ffffU
339#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT) & M_LP_COUNT_T5)
340#define M_HP_COUNT 0x7ffU
341#define S_HP_COUNT 16
342#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
343#define S_LP_INT_THRESH 12
344#define M_LP_INT_THRESH 0xfU
345#define M_LP_INT_THRESH_T5 0xfffU
346#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
347#define M_LP_COUNT 0x7ffU
348#define S_LP_COUNT 0
349#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
350#define A_SGE_DBFIFO_STATUS 0x10a4
351
352#define SGE_STAT_TOTAL 0x10e4
353#define SGE_STAT_MATCH 0x10e8
354
355#define SGE_STAT_CFG 0x10ec
356#define S_STATSOURCE_T5 9
357#define STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
358
359#define SGE_DBFIFO_STATUS2 0x1118
360#define M_HP_COUNT_T5 0x3ffU
361#define G_HP_COUNT_T5(x) ((x) & M_HP_COUNT_T5)
362#define S_HP_INT_THRESH_T5 10
363#define M_HP_INT_THRESH_T5 0xfU
364#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5)
365
366#define S_ENABLE_DROP 13
367#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
368#define F_ENABLE_DROP V_ENABLE_DROP(1U)
369#define S_DROPPED_DB 0
370#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
371#define F_DROPPED_DB V_DROPPED_DB(1U)
372#define A_SGE_DOORBELL_CONTROL 0x10a8
373
374#define A_SGE_CTXT_CMD 0x11fc
375#define A_SGE_DBQ_CTXT_BADDR 0x1084
376
377#define PCIE_PF_CFG 0x40
378#define AIVEC(x) ((x) << 4)
379#define AIVEC_MASK 0x3ffU
380
381#define PCIE_PF_CLI 0x44
382#define PCIE_INT_CAUSE 0x3004
383#define UNXSPLCPLERR 0x20000000U
384#define PCIEPINT 0x10000000U
385#define PCIESINT 0x08000000U
386#define RPLPERR 0x04000000U
387#define RXWRPERR 0x02000000U
388#define RXCPLPERR 0x01000000U
389#define PIOTAGPERR 0x00800000U
390#define MATAGPERR 0x00400000U
391#define INTXCLRPERR 0x00200000U
392#define FIDPERR 0x00100000U
393#define CFGSNPPERR 0x00080000U
394#define HRSPPERR 0x00040000U
395#define HREQPERR 0x00020000U
396#define HCNTPERR 0x00010000U
397#define DRSPPERR 0x00008000U
398#define DREQPERR 0x00004000U
399#define DCNTPERR 0x00002000U
400#define CRSPPERR 0x00001000U
401#define CREQPERR 0x00000800U
402#define CCNTPERR 0x00000400U
403#define TARTAGPERR 0x00000200U
404#define PIOREQPERR 0x00000100U
405#define PIOCPLPERR 0x00000080U
406#define MSIXDIPERR 0x00000040U
407#define MSIXDATAPERR 0x00000020U
408#define MSIXADDRHPERR 0x00000010U
409#define MSIXADDRLPERR 0x00000008U
410#define MSIDATAPERR 0x00000004U
411#define MSIADDRHPERR 0x00000002U
412#define MSIADDRLPERR 0x00000001U
413
414#define READRSPERR 0x20000000U
415#define TRGT1GRPPERR 0x10000000U
416#define IPSOTPERR 0x08000000U
417#define IPRXDATAGRPPERR 0x02000000U
418#define IPRXHDRGRPPERR 0x01000000U
419#define MAGRPPERR 0x00400000U
420#define VFIDPERR 0x00200000U
421#define HREQWRPERR 0x00010000U
422#define DREQWRPERR 0x00002000U
423#define MSTTAGQPERR 0x00000400U
424#define PIOREQGRPPERR 0x00000100U
425#define PIOCPLGRPPERR 0x00000080U
426#define MSIXSTIPERR 0x00000004U
427#define MSTTIMEOUTPERR 0x00000002U
428#define MSTGRPPERR 0x00000001U
429
430#define PCIE_NONFAT_ERR 0x3010
431#define PCIE_CFG_SPACE_REQ 0x3060
432#define PCIE_CFG_SPACE_DATA 0x3064
433#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
434#define S_PCIEOFST 10
435#define M_PCIEOFST 0x3fffffU
436#define GET_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
437#define PCIEOFST_MASK 0xfffffc00U
438#define BIR_MASK 0x00000300U
439#define BIR_SHIFT 8
440#define BIR(x) ((x) << BIR_SHIFT)
441#define WINDOW_MASK 0x000000ffU
442#define WINDOW_SHIFT 0
443#define WINDOW(x) ((x) << WINDOW_SHIFT)
444#define GET_WINDOW(x) (((x) >> WINDOW_SHIFT) & WINDOW_MASK)
445#define PCIE_MEM_ACCESS_OFFSET 0x306c
446#define ENABLE (1U << 30)
447#define FUNCTION(x) ((x) << 12)
448#define F_LOCALCFG (1U << 28)
449
450#define S_PFNUM 0
451#define V_PFNUM(x) ((x) << S_PFNUM)
452
453#define PCIE_FW 0x30b8
454#define PCIE_FW_ERR 0x80000000U
455#define PCIE_FW_INIT 0x40000000U
456#define PCIE_FW_HALT 0x20000000U
457#define PCIE_FW_MASTER_VLD 0x00008000U
458#define PCIE_FW_MASTER(x) ((x) << 12)
459#define PCIE_FW_MASTER_MASK 0x7
460#define PCIE_FW_MASTER_GET(x) (((x) >> 12) & PCIE_FW_MASTER_MASK)
461
462#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
463#define RNPP 0x80000000U
464#define RPCP 0x20000000U
465#define RCIP 0x08000000U
466#define RCCP 0x04000000U
467#define RFTP 0x00800000U
468#define PTRP 0x00100000U
469
470#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
471#define TPCP 0x40000000U
472#define TNPP 0x20000000U
473#define TFTP 0x10000000U
474#define TCAP 0x08000000U
475#define TCIP 0x04000000U
476#define RCAP 0x02000000U
477#define PLUP 0x00800000U
478#define PLDN 0x00400000U
479#define OTDD 0x00200000U
480#define GTRP 0x00100000U
481#define RDPE 0x00040000U
482#define TDCE 0x00020000U
483#define TDUE 0x00010000U
484
485#define MC_INT_CAUSE 0x7518
486#define MC_P_INT_CAUSE 0x41318
487#define ECC_UE_INT_CAUSE 0x00000004U
488#define ECC_CE_INT_CAUSE 0x00000002U
489#define PERR_INT_CAUSE 0x00000001U
490
491#define MC_ECC_STATUS 0x751c
492#define MC_P_ECC_STATUS 0x4131c
493#define ECC_CECNT_MASK 0xffff0000U
494#define ECC_CECNT_SHIFT 16
495#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT)
496#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
497#define ECC_UECNT_MASK 0x0000ffffU
498#define ECC_UECNT_SHIFT 0
499#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT)
500#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
501
502#define MC_BIST_CMD 0x7600
503#define START_BIST 0x80000000U
504#define BIST_CMD_GAP_MASK 0x0000ff00U
505#define BIST_CMD_GAP_SHIFT 8
506#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT)
507#define BIST_OPCODE_MASK 0x00000003U
508#define BIST_OPCODE_SHIFT 0
509#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT)
510
511#define MC_BIST_CMD_ADDR 0x7604
512#define MC_BIST_CMD_LEN 0x7608
513#define MC_BIST_DATA_PATTERN 0x760c
514#define BIST_DATA_TYPE_MASK 0x0000000fU
515#define BIST_DATA_TYPE_SHIFT 0
516#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT)
517
518#define MC_BIST_STATUS_RDATA 0x7688
519 738
739#define TPCP_S 30
740#define TPCP_V(x) ((x) << TPCP_S)
741#define TPCP_F TPCP_V(1U)
742
743#define TNPP_S 29
744#define TNPP_V(x) ((x) << TNPP_S)
745#define TNPP_F TNPP_V(1U)
746
747#define TFTP_S 28
748#define TFTP_V(x) ((x) << TFTP_S)
749#define TFTP_F TFTP_V(1U)
750
751#define TCAP_S 27
752#define TCAP_V(x) ((x) << TCAP_S)
753#define TCAP_F TCAP_V(1U)
754
755#define TCIP_S 26
756#define TCIP_V(x) ((x) << TCIP_S)
757#define TCIP_F TCIP_V(1U)
758
759#define RCAP_S 25
760#define RCAP_V(x) ((x) << RCAP_S)
761#define RCAP_F RCAP_V(1U)
762
763#define PLUP_S 23
764#define PLUP_V(x) ((x) << PLUP_S)
765#define PLUP_F PLUP_V(1U)
766
767#define PLDN_S 22
768#define PLDN_V(x) ((x) << PLDN_S)
769#define PLDN_F PLDN_V(1U)
770
771#define OTDD_S 21
772#define OTDD_V(x) ((x) << OTDD_S)
773#define OTDD_F OTDD_V(1U)
774
775#define GTRP_S 20
776#define GTRP_V(x) ((x) << GTRP_S)
777#define GTRP_F GTRP_V(1U)
778
779#define RDPE_S 18
780#define RDPE_V(x) ((x) << RDPE_S)
781#define RDPE_F RDPE_V(1U)
782
783#define TDCE_S 17
784#define TDCE_V(x) ((x) << TDCE_S)
785#define TDCE_F TDCE_V(1U)
786
787#define TDUE_S 16
788#define TDUE_V(x) ((x) << TDUE_S)
789#define TDUE_F TDUE_V(1U)
790
791/* registers for module MC */
792#define MC_INT_CAUSE_A 0x7518
793#define MC_P_INT_CAUSE_A 0x41318
794
795#define ECC_UE_INT_CAUSE_S 2
796#define ECC_UE_INT_CAUSE_V(x) ((x) << ECC_UE_INT_CAUSE_S)
797#define ECC_UE_INT_CAUSE_F ECC_UE_INT_CAUSE_V(1U)
798
799#define ECC_CE_INT_CAUSE_S 1
800#define ECC_CE_INT_CAUSE_V(x) ((x) << ECC_CE_INT_CAUSE_S)
801#define ECC_CE_INT_CAUSE_F ECC_CE_INT_CAUSE_V(1U)
802
803#define PERR_INT_CAUSE_S 0
804#define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S)
805#define PERR_INT_CAUSE_F PERR_INT_CAUSE_V(1U)
806
807#define MC_ECC_STATUS_A 0x751c
808#define MC_P_ECC_STATUS_A 0x4131c
809
810#define ECC_CECNT_S 16
811#define ECC_CECNT_M 0xffffU
812#define ECC_CECNT_V(x) ((x) << ECC_CECNT_S)
813#define ECC_CECNT_G(x) (((x) >> ECC_CECNT_S) & ECC_CECNT_M)
814
815#define ECC_UECNT_S 0
816#define ECC_UECNT_M 0xffffU
817#define ECC_UECNT_V(x) ((x) << ECC_UECNT_S)
818#define ECC_UECNT_G(x) (((x) >> ECC_UECNT_S) & ECC_UECNT_M)
819
820#define MC_BIST_CMD_A 0x7600
821
822#define START_BIST_S 31
823#define START_BIST_V(x) ((x) << START_BIST_S)
824#define START_BIST_F START_BIST_V(1U)
825
826#define BIST_CMD_GAP_S 8
827#define BIST_CMD_GAP_V(x) ((x) << BIST_CMD_GAP_S)
828
829#define BIST_OPCODE_S 0
830#define BIST_OPCODE_V(x) ((x) << BIST_OPCODE_S)
831
832#define MC_BIST_CMD_ADDR_A 0x7604
833#define MC_BIST_CMD_LEN_A 0x7608
834#define MC_BIST_DATA_PATTERN_A 0x760c
835
836#define MC_BIST_STATUS_RDATA_A 0x7688
837
838/* registers for module MA */
520#define MA_EDRAM0_BAR_A 0x77c0 839#define MA_EDRAM0_BAR_A 0x77c0
521 840
522#define EDRAM0_SIZE_S 0 841#define EDRAM0_SIZE_S 0
@@ -574,263 +893,608 @@
574#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S) 893#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S)
575#define EXT_MEM0_ENABLE_F EXT_MEM0_ENABLE_V(1U) 894#define EXT_MEM0_ENABLE_F EXT_MEM0_ENABLE_V(1U)
576 895
577#define MA_INT_CAUSE 0x77e0 896#define MA_INT_CAUSE_A 0x77e0
578#define MEM_PERR_INT_CAUSE 0x00000002U 897
579#define MEM_WRAP_INT_CAUSE 0x00000001U 898#define MEM_PERR_INT_CAUSE_S 1
580 899#define MEM_PERR_INT_CAUSE_V(x) ((x) << MEM_PERR_INT_CAUSE_S)
581#define MA_INT_WRAP_STATUS 0x77e4 900#define MEM_PERR_INT_CAUSE_F MEM_PERR_INT_CAUSE_V(1U)
582#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U 901
583#define MEM_WRAP_ADDRESS_SHIFT 4 902#define MEM_WRAP_INT_CAUSE_S 0
584#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT) 903#define MEM_WRAP_INT_CAUSE_V(x) ((x) << MEM_WRAP_INT_CAUSE_S)
585#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU 904#define MEM_WRAP_INT_CAUSE_F MEM_WRAP_INT_CAUSE_V(1U)
586#define MEM_WRAP_CLIENT_NUM_SHIFT 0 905
587#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) 906#define MA_INT_WRAP_STATUS_A 0x77e4
588#define MA_PCIE_FW 0x30b8 907
589#define MA_PARITY_ERROR_STATUS 0x77f4 908#define MEM_WRAP_ADDRESS_S 4
590#define MA_PARITY_ERROR_STATUS2 0x7804 909#define MEM_WRAP_ADDRESS_M 0xfffffffU
591 910#define MEM_WRAP_ADDRESS_G(x) (((x) >> MEM_WRAP_ADDRESS_S) & MEM_WRAP_ADDRESS_M)
592#define EDC_0_BASE_ADDR 0x7900 911
593 912#define MEM_WRAP_CLIENT_NUM_S 0
594#define EDC_BIST_CMD 0x7904 913#define MEM_WRAP_CLIENT_NUM_M 0xfU
595#define EDC_BIST_CMD_ADDR 0x7908 914#define MEM_WRAP_CLIENT_NUM_G(x) \
596#define EDC_BIST_CMD_LEN 0x790c 915 (((x) >> MEM_WRAP_CLIENT_NUM_S) & MEM_WRAP_CLIENT_NUM_M)
597#define EDC_BIST_DATA_PATTERN 0x7910 916
598#define EDC_BIST_STATUS_RDATA 0x7928 917#define MA_PARITY_ERROR_STATUS_A 0x77f4
599#define EDC_INT_CAUSE 0x7978 918#define MA_PARITY_ERROR_STATUS1_A 0x77f4
600#define ECC_UE_PAR 0x00000020U 919#define MA_PARITY_ERROR_STATUS2_A 0x7804
601#define ECC_CE_PAR 0x00000010U 920
602#define PERR_PAR_CAUSE 0x00000008U 921/* registers for module EDC_0 */
603 922#define EDC_0_BASE_ADDR 0x7900
604#define EDC_ECC_STATUS 0x797c 923
605 924#define EDC_BIST_CMD_A 0x7904
606#define EDC_1_BASE_ADDR 0x7980 925#define EDC_BIST_CMD_ADDR_A 0x7908
607 926#define EDC_BIST_CMD_LEN_A 0x790c
608#define CIM_BOOT_CFG 0x7b00 927#define EDC_BIST_DATA_PATTERN_A 0x7910
609#define BOOTADDR_MASK 0xffffff00U 928#define EDC_BIST_STATUS_RDATA_A 0x7928
610#define UPCRST 0x1U 929#define EDC_INT_CAUSE_A 0x7978
611 930
612#define CIM_PF_MAILBOX_DATA 0x240 931#define ECC_UE_PAR_S 5
613#define CIM_PF_MAILBOX_CTRL 0x280 932#define ECC_UE_PAR_V(x) ((x) << ECC_UE_PAR_S)
614#define MBMSGVALID 0x00000008U 933#define ECC_UE_PAR_F ECC_UE_PAR_V(1U)
615#define MBINTREQ 0x00000004U 934
616#define MBOWNER_MASK 0x00000003U 935#define ECC_CE_PAR_S 4
617#define MBOWNER_SHIFT 0 936#define ECC_CE_PAR_V(x) ((x) << ECC_CE_PAR_S)
618#define MBOWNER(x) ((x) << MBOWNER_SHIFT) 937#define ECC_CE_PAR_F ECC_CE_PAR_V(1U)
619#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT) 938
620 939#define PERR_PAR_CAUSE_S 3
621#define CIM_PF_HOST_INT_ENABLE 0x288 940#define PERR_PAR_CAUSE_V(x) ((x) << PERR_PAR_CAUSE_S)
622#define MBMSGRDYINTEN(x) ((x) << 19) 941#define PERR_PAR_CAUSE_F PERR_PAR_CAUSE_V(1U)
623 942
624#define CIM_PF_HOST_INT_CAUSE 0x28c 943#define EDC_ECC_STATUS_A 0x797c
625#define MBMSGRDYINT 0x00080000U 944
626 945/* registers for module EDC_1 */
627#define CIM_HOST_INT_CAUSE 0x7b2c 946#define EDC_1_BASE_ADDR 0x7980
628#define TIEQOUTPARERRINT 0x00100000U 947
629#define TIEQINPARERRINT 0x00080000U 948/* registers for module CIM */
630#define MBHOSTPARERR 0x00040000U 949#define CIM_BOOT_CFG_A 0x7b00
631#define MBUPPARERR 0x00020000U 950#define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290
632#define IBQPARERR 0x0001f800U 951
633#define IBQTP0PARERR 0x00010000U 952#define BOOTADDR_M 0xffffff00U
634#define IBQTP1PARERR 0x00008000U 953
635#define IBQULPPARERR 0x00004000U 954#define UPCRST_S 0
636#define IBQSGELOPARERR 0x00002000U 955#define UPCRST_V(x) ((x) << UPCRST_S)
637#define IBQSGEHIPARERR 0x00001000U 956#define UPCRST_F UPCRST_V(1U)
638#define IBQNCSIPARERR 0x00000800U 957
639#define OBQPARERR 0x000007e0U 958#define CIM_PF_MAILBOX_DATA_A 0x240
640#define OBQULP0PARERR 0x00000400U 959#define CIM_PF_MAILBOX_CTRL_A 0x280
641#define OBQULP1PARERR 0x00000200U 960
642#define OBQULP2PARERR 0x00000100U 961#define MBMSGVALID_S 3
643#define OBQULP3PARERR 0x00000080U 962#define MBMSGVALID_V(x) ((x) << MBMSGVALID_S)
644#define OBQSGEPARERR 0x00000040U 963#define MBMSGVALID_F MBMSGVALID_V(1U)
645#define OBQNCSIPARERR 0x00000020U 964
646#define PREFDROPINT 0x00000002U 965#define MBINTREQ_S 2
647#define UPACCNONZERO 0x00000001U 966#define MBINTREQ_V(x) ((x) << MBINTREQ_S)
648 967#define MBINTREQ_F MBINTREQ_V(1U)
649#define CIM_HOST_UPACC_INT_CAUSE 0x7b34 968
650#define EEPROMWRINT 0x40000000U 969#define MBOWNER_S 0
651#define TIMEOUTMAINT 0x20000000U 970#define MBOWNER_M 0x3U
652#define TIMEOUTINT 0x10000000U 971#define MBOWNER_V(x) ((x) << MBOWNER_S)
653#define RSPOVRLOOKUPINT 0x08000000U 972#define MBOWNER_G(x) (((x) >> MBOWNER_S) & MBOWNER_M)
654#define REQOVRLOOKUPINT 0x04000000U 973
655#define BLKWRPLINT 0x02000000U 974#define CIM_PF_HOST_INT_ENABLE_A 0x288
656#define BLKRDPLINT 0x01000000U 975
657#define SGLWRPLINT 0x00800000U 976#define MBMSGRDYINTEN_S 19
658#define SGLRDPLINT 0x00400000U 977#define MBMSGRDYINTEN_V(x) ((x) << MBMSGRDYINTEN_S)
659#define BLKWRCTLINT 0x00200000U 978#define MBMSGRDYINTEN_F MBMSGRDYINTEN_V(1U)
660#define BLKRDCTLINT 0x00100000U 979
661#define SGLWRCTLINT 0x00080000U 980#define CIM_PF_HOST_INT_CAUSE_A 0x28c
662#define SGLRDCTLINT 0x00040000U 981
663#define BLKWREEPROMINT 0x00020000U 982#define MBMSGRDYINT_S 19
664#define BLKRDEEPROMINT 0x00010000U 983#define MBMSGRDYINT_V(x) ((x) << MBMSGRDYINT_S)
665#define SGLWREEPROMINT 0x00008000U 984#define MBMSGRDYINT_F MBMSGRDYINT_V(1U)
666#define SGLRDEEPROMINT 0x00004000U 985
667#define BLKWRFLASHINT 0x00002000U 986#define CIM_HOST_INT_CAUSE_A 0x7b2c
668#define BLKRDFLASHINT 0x00001000U 987
669#define SGLWRFLASHINT 0x00000800U 988#define TIEQOUTPARERRINT_S 20
670#define SGLRDFLASHINT 0x00000400U 989#define TIEQOUTPARERRINT_V(x) ((x) << TIEQOUTPARERRINT_S)
671#define BLKWRBOOTINT 0x00000200U 990#define TIEQOUTPARERRINT_F TIEQOUTPARERRINT_V(1U)
672#define BLKRDBOOTINT 0x00000100U 991
673#define SGLWRBOOTINT 0x00000080U 992#define TIEQINPARERRINT_S 19
674#define SGLRDBOOTINT 0x00000040U 993#define TIEQINPARERRINT_V(x) ((x) << TIEQINPARERRINT_S)
675#define ILLWRBEINT 0x00000020U 994#define TIEQINPARERRINT_F TIEQINPARERRINT_V(1U)
676#define ILLRDBEINT 0x00000010U 995
677#define ILLRDINT 0x00000008U 996#define PREFDROPINT_S 1
678#define ILLWRINT 0x00000004U 997#define PREFDROPINT_V(x) ((x) << PREFDROPINT_S)
679#define ILLTRANSINT 0x00000002U 998#define PREFDROPINT_F PREFDROPINT_V(1U)
680#define RSVDSPACEINT 0x00000001U 999
681 1000#define UPACCNONZERO_S 0
682#define TP_OUT_CONFIG 0x7d04 1001#define UPACCNONZERO_V(x) ((x) << UPACCNONZERO_S)
683#define VLANEXTENABLE_MASK 0x0000f000U 1002#define UPACCNONZERO_F UPACCNONZERO_V(1U)
684#define VLANEXTENABLE_SHIFT 12 1003
685 1004#define MBHOSTPARERR_S 18
686#define TP_GLOBAL_CONFIG 0x7d08 1005#define MBHOSTPARERR_V(x) ((x) << MBHOSTPARERR_S)
687#define FIVETUPLELOOKUP_SHIFT 17 1006#define MBHOSTPARERR_F MBHOSTPARERR_V(1U)
688#define FIVETUPLELOOKUP_MASK 0x00060000U 1007
689#define FIVETUPLELOOKUP(x) ((x) << FIVETUPLELOOKUP_SHIFT) 1008#define MBUPPARERR_S 17
690#define FIVETUPLELOOKUP_GET(x) (((x) & FIVETUPLELOOKUP_MASK) >> \ 1009#define MBUPPARERR_V(x) ((x) << MBUPPARERR_S)
691 FIVETUPLELOOKUP_SHIFT) 1010#define MBUPPARERR_F MBUPPARERR_V(1U)
692 1011
693#define TP_PARA_REG2 0x7d68 1012#define IBQTP0PARERR_S 16
694#define MAXRXDATA_MASK 0xffff0000U 1013#define IBQTP0PARERR_V(x) ((x) << IBQTP0PARERR_S)
695#define MAXRXDATA_SHIFT 16 1014#define IBQTP0PARERR_F IBQTP0PARERR_V(1U)
696#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT) 1015
697 1016#define IBQTP1PARERR_S 15
698#define TP_TIMER_RESOLUTION 0x7d90 1017#define IBQTP1PARERR_V(x) ((x) << IBQTP1PARERR_S)
699#define TIMERRESOLUTION_MASK 0x00ff0000U 1018#define IBQTP1PARERR_F IBQTP1PARERR_V(1U)
700#define TIMERRESOLUTION_SHIFT 16 1019
701#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT) 1020#define IBQULPPARERR_S 14
702#define DELAYEDACKRESOLUTION_MASK 0x000000ffU 1021#define IBQULPPARERR_V(x) ((x) << IBQULPPARERR_S)
703#define DELAYEDACKRESOLUTION_SHIFT 0 1022#define IBQULPPARERR_F IBQULPPARERR_V(1U)
704#define DELAYEDACKRESOLUTION_GET(x) \ 1023
705 (((x) & DELAYEDACKRESOLUTION_MASK) >> DELAYEDACKRESOLUTION_SHIFT) 1024#define IBQSGELOPARERR_S 13
706 1025#define IBQSGELOPARERR_V(x) ((x) << IBQSGELOPARERR_S)
707#define TP_SHIFT_CNT 0x7dc0 1026#define IBQSGELOPARERR_F IBQSGELOPARERR_V(1U)
708#define SYNSHIFTMAX_SHIFT 24 1027
709#define SYNSHIFTMAX_MASK 0xff000000U 1028#define IBQSGEHIPARERR_S 12
710#define SYNSHIFTMAX(x) ((x) << SYNSHIFTMAX_SHIFT) 1029#define IBQSGEHIPARERR_V(x) ((x) << IBQSGEHIPARERR_S)
711#define SYNSHIFTMAX_GET(x) (((x) & SYNSHIFTMAX_MASK) >> \ 1030#define IBQSGEHIPARERR_F IBQSGEHIPARERR_V(1U)
712 SYNSHIFTMAX_SHIFT) 1031
713#define RXTSHIFTMAXR1_SHIFT 20 1032#define IBQNCSIPARERR_S 11
714#define RXTSHIFTMAXR1_MASK 0x00f00000U 1033#define IBQNCSIPARERR_V(x) ((x) << IBQNCSIPARERR_S)
715#define RXTSHIFTMAXR1(x) ((x) << RXTSHIFTMAXR1_SHIFT) 1034#define IBQNCSIPARERR_F IBQNCSIPARERR_V(1U)
716#define RXTSHIFTMAXR1_GET(x) (((x) & RXTSHIFTMAXR1_MASK) >> \ 1035
717 RXTSHIFTMAXR1_SHIFT) 1036#define OBQULP0PARERR_S 10
718#define RXTSHIFTMAXR2_SHIFT 16 1037#define OBQULP0PARERR_V(x) ((x) << OBQULP0PARERR_S)
719#define RXTSHIFTMAXR2_MASK 0x000f0000U 1038#define OBQULP0PARERR_F OBQULP0PARERR_V(1U)
720#define RXTSHIFTMAXR2(x) ((x) << RXTSHIFTMAXR2_SHIFT) 1039
721#define RXTSHIFTMAXR2_GET(x) (((x) & RXTSHIFTMAXR2_MASK) >> \ 1040#define OBQULP1PARERR_S 9
722 RXTSHIFTMAXR2_SHIFT) 1041#define OBQULP1PARERR_V(x) ((x) << OBQULP1PARERR_S)
723#define PERSHIFTBACKOFFMAX_SHIFT 12 1042#define OBQULP1PARERR_F OBQULP1PARERR_V(1U)
724#define PERSHIFTBACKOFFMAX_MASK 0x0000f000U 1043
725#define PERSHIFTBACKOFFMAX(x) ((x) << PERSHIFTBACKOFFMAX_SHIFT) 1044#define OBQULP2PARERR_S 8
726#define PERSHIFTBACKOFFMAX_GET(x) (((x) & PERSHIFTBACKOFFMAX_MASK) >> \ 1045#define OBQULP2PARERR_V(x) ((x) << OBQULP2PARERR_S)
727 PERSHIFTBACKOFFMAX_SHIFT) 1046#define OBQULP2PARERR_F OBQULP2PARERR_V(1U)
728#define PERSHIFTMAX_SHIFT 8 1047
729#define PERSHIFTMAX_MASK 0x00000f00U 1048#define OBQULP3PARERR_S 7
730#define PERSHIFTMAX(x) ((x) << PERSHIFTMAX_SHIFT) 1049#define OBQULP3PARERR_V(x) ((x) << OBQULP3PARERR_S)
731#define PERSHIFTMAX_GET(x) (((x) & PERSHIFTMAX_MASK) >> \ 1050#define OBQULP3PARERR_F OBQULP3PARERR_V(1U)
732 PERSHIFTMAX_SHIFT) 1051
733#define KEEPALIVEMAXR1_SHIFT 4 1052#define OBQSGEPARERR_S 6
734#define KEEPALIVEMAXR1_MASK 0x000000f0U 1053#define OBQSGEPARERR_V(x) ((x) << OBQSGEPARERR_S)
735#define KEEPALIVEMAXR1(x) ((x) << KEEPALIVEMAXR1_SHIFT) 1054#define OBQSGEPARERR_F OBQSGEPARERR_V(1U)
736#define KEEPALIVEMAXR1_GET(x) (((x) & KEEPALIVEMAXR1_MASK) >> \ 1055
737 KEEPALIVEMAXR1_SHIFT) 1056#define OBQNCSIPARERR_S 5
738#define KEEPALIVEMAXR2_SHIFT 0 1057#define OBQNCSIPARERR_V(x) ((x) << OBQNCSIPARERR_S)
739#define KEEPALIVEMAXR2_MASK 0x0000000fU 1058#define OBQNCSIPARERR_F OBQNCSIPARERR_V(1U)
740#define KEEPALIVEMAXR2(x) ((x) << KEEPALIVEMAXR2_SHIFT) 1059
741#define KEEPALIVEMAXR2_GET(x) (((x) & KEEPALIVEMAXR2_MASK) >> \ 1060#define CIM_HOST_UPACC_INT_CAUSE_A 0x7b34
742 KEEPALIVEMAXR2_SHIFT) 1061
743 1062#define EEPROMWRINT_S 30
744#define TP_CCTRL_TABLE 0x7ddc 1063#define EEPROMWRINT_V(x) ((x) << EEPROMWRINT_S)
745#define TP_MTU_TABLE 0x7de4 1064#define EEPROMWRINT_F EEPROMWRINT_V(1U)
746#define MTUINDEX_MASK 0xff000000U 1065
747#define MTUINDEX_SHIFT 24 1066#define TIMEOUTMAINT_S 29
748#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT) 1067#define TIMEOUTMAINT_V(x) ((x) << TIMEOUTMAINT_S)
749#define MTUWIDTH_MASK 0x000f0000U 1068#define TIMEOUTMAINT_F TIMEOUTMAINT_V(1U)
750#define MTUWIDTH_SHIFT 16 1069
751#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT) 1070#define TIMEOUTINT_S 28
752#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT) 1071#define TIMEOUTINT_V(x) ((x) << TIMEOUTINT_S)
753#define MTUVALUE_MASK 0x00003fffU 1072#define TIMEOUTINT_F TIMEOUTINT_V(1U)
754#define MTUVALUE_SHIFT 0 1073
755#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT) 1074#define RSPOVRLOOKUPINT_S 27
756#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT) 1075#define RSPOVRLOOKUPINT_V(x) ((x) << RSPOVRLOOKUPINT_S)
757 1076#define RSPOVRLOOKUPINT_F RSPOVRLOOKUPINT_V(1U)
758#define TP_RSS_LKP_TABLE 0x7dec 1077
759#define LKPTBLROWVLD 0x80000000U 1078#define REQOVRLOOKUPINT_S 26
760#define LKPTBLQUEUE1_MASK 0x000ffc00U 1079#define REQOVRLOOKUPINT_V(x) ((x) << REQOVRLOOKUPINT_S)
761#define LKPTBLQUEUE1_SHIFT 10 1080#define REQOVRLOOKUPINT_F REQOVRLOOKUPINT_V(1U)
762#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT) 1081
763#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT) 1082#define BLKWRPLINT_S 25
764#define LKPTBLQUEUE0_MASK 0x000003ffU 1083#define BLKWRPLINT_V(x) ((x) << BLKWRPLINT_S)
765#define LKPTBLQUEUE0_SHIFT 0 1084#define BLKWRPLINT_F BLKWRPLINT_V(1U)
766#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT) 1085
767#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT) 1086#define BLKRDPLINT_S 24
768 1087#define BLKRDPLINT_V(x) ((x) << BLKRDPLINT_S)
769#define TP_PIO_ADDR 0x7e40 1088#define BLKRDPLINT_F BLKRDPLINT_V(1U)
770#define TP_PIO_DATA 0x7e44 1089
771#define TP_MIB_INDEX 0x7e50 1090#define SGLWRPLINT_S 23
772#define TP_MIB_DATA 0x7e54 1091#define SGLWRPLINT_V(x) ((x) << SGLWRPLINT_S)
773#define TP_INT_CAUSE 0x7e74 1092#define SGLWRPLINT_F SGLWRPLINT_V(1U)
774#define FLMTXFLSTEMPTY 0x40000000U 1093
775 1094#define SGLRDPLINT_S 22
776#define TP_VLAN_PRI_MAP 0x140 1095#define SGLRDPLINT_V(x) ((x) << SGLRDPLINT_S)
777#define FRAGMENTATION_SHIFT 9 1096#define SGLRDPLINT_F SGLRDPLINT_V(1U)
778#define FRAGMENTATION_MASK 0x00000200U 1097
779#define MPSHITTYPE_MASK 0x00000100U 1098#define BLKWRCTLINT_S 21
780#define MACMATCH_MASK 0x00000080U 1099#define BLKWRCTLINT_V(x) ((x) << BLKWRCTLINT_S)
781#define ETHERTYPE_MASK 0x00000040U 1100#define BLKWRCTLINT_F BLKWRCTLINT_V(1U)
782#define PROTOCOL_MASK 0x00000020U 1101
783#define TOS_MASK 0x00000010U 1102#define BLKRDCTLINT_S 20
784#define VLAN_MASK 0x00000008U 1103#define BLKRDCTLINT_V(x) ((x) << BLKRDCTLINT_S)
785#define VNIC_ID_MASK 0x00000004U 1104#define BLKRDCTLINT_F BLKRDCTLINT_V(1U)
786#define PORT_MASK 0x00000002U 1105
787#define FCOE_SHIFT 0 1106#define SGLWRCTLINT_S 19
788#define FCOE_MASK 0x00000001U 1107#define SGLWRCTLINT_V(x) ((x) << SGLWRCTLINT_S)
789 1108#define SGLWRCTLINT_F SGLWRCTLINT_V(1U)
790#define TP_INGRESS_CONFIG 0x141 1109
791#define VNIC 0x00000800U 1110#define SGLRDCTLINT_S 18
792#define CSUM_HAS_PSEUDO_HDR 0x00000400U 1111#define SGLRDCTLINT_V(x) ((x) << SGLRDCTLINT_S)
793#define RM_OVLAN 0x00000200U 1112#define SGLRDCTLINT_F SGLRDCTLINT_V(1U)
794#define LOOKUPEVERYPKT 0x00000100U 1113
795 1114#define BLKWREEPROMINT_S 17
796#define TP_MIB_MAC_IN_ERR_0 0x0 1115#define BLKWREEPROMINT_V(x) ((x) << BLKWREEPROMINT_S)
797#define TP_MIB_TCP_OUT_RST 0xc 1116#define BLKWREEPROMINT_F BLKWREEPROMINT_V(1U)
798#define TP_MIB_TCP_IN_SEG_HI 0x10 1117
799#define TP_MIB_TCP_IN_SEG_LO 0x11 1118#define BLKRDEEPROMINT_S 16
800#define TP_MIB_TCP_OUT_SEG_HI 0x12 1119#define BLKRDEEPROMINT_V(x) ((x) << BLKRDEEPROMINT_S)
801#define TP_MIB_TCP_OUT_SEG_LO 0x13 1120#define BLKRDEEPROMINT_F BLKRDEEPROMINT_V(1U)
802#define TP_MIB_TCP_RXT_SEG_HI 0x14 1121
803#define TP_MIB_TCP_RXT_SEG_LO 0x15 1122#define SGLWREEPROMINT_S 15
804#define TP_MIB_TNL_CNG_DROP_0 0x18 1123#define SGLWREEPROMINT_V(x) ((x) << SGLWREEPROMINT_S)
805#define TP_MIB_TCP_V6IN_ERR_0 0x28 1124#define SGLWREEPROMINT_F SGLWREEPROMINT_V(1U)
806#define TP_MIB_TCP_V6OUT_RST 0x2c 1125
807#define TP_MIB_OFD_ARP_DROP 0x36 1126#define SGLRDEEPROMINT_S 14
808#define TP_MIB_TNL_DROP_0 0x44 1127#define SGLRDEEPROMINT_V(x) ((x) << SGLRDEEPROMINT_S)
809#define TP_MIB_OFD_VLN_DROP_0 0x58 1128#define SGLRDEEPROMINT_F SGLRDEEPROMINT_V(1U)
810 1129
811#define ULP_TX_INT_CAUSE 0x8dcc 1130#define BLKWRFLASHINT_S 13
812#define PBL_BOUND_ERR_CH3 0x80000000U 1131#define BLKWRFLASHINT_V(x) ((x) << BLKWRFLASHINT_S)
813#define PBL_BOUND_ERR_CH2 0x40000000U 1132#define BLKWRFLASHINT_F BLKWRFLASHINT_V(1U)
814#define PBL_BOUND_ERR_CH1 0x20000000U 1133
815#define PBL_BOUND_ERR_CH0 0x10000000U 1134#define BLKRDFLASHINT_S 12
816 1135#define BLKRDFLASHINT_V(x) ((x) << BLKRDFLASHINT_S)
817#define PM_RX_INT_CAUSE 0x8fdc 1136#define BLKRDFLASHINT_F BLKRDFLASHINT_V(1U)
818#define ZERO_E_CMD_ERROR 0x00400000U 1137
819#define PMRX_FRAMING_ERROR 0x003ffff0U 1138#define SGLWRFLASHINT_S 11
820#define OCSPI_PAR_ERROR 0x00000008U 1139#define SGLWRFLASHINT_V(x) ((x) << SGLWRFLASHINT_S)
821#define DB_OPTIONS_PAR_ERROR 0x00000004U 1140#define SGLWRFLASHINT_F SGLWRFLASHINT_V(1U)
822#define IESPI_PAR_ERROR 0x00000002U 1141
823#define E_PCMD_PAR_ERROR 0x00000001U 1142#define SGLRDFLASHINT_S 10
824 1143#define SGLRDFLASHINT_V(x) ((x) << SGLRDFLASHINT_S)
825#define PM_TX_INT_CAUSE 0x8ffc 1144#define SGLRDFLASHINT_F SGLRDFLASHINT_V(1U)
826#define PCMD_LEN_OVFL0 0x80000000U 1145
827#define PCMD_LEN_OVFL1 0x40000000U 1146#define BLKWRBOOTINT_S 9
828#define PCMD_LEN_OVFL2 0x20000000U 1147#define BLKWRBOOTINT_V(x) ((x) << BLKWRBOOTINT_S)
829#define ZERO_C_CMD_ERROR 0x10000000U 1148#define BLKWRBOOTINT_F BLKWRBOOTINT_V(1U)
830#define PMTX_FRAMING_ERROR 0x0ffffff0U 1149
831#define OESPI_PAR_ERROR 0x00000008U 1150#define BLKRDBOOTINT_S 8
832#define ICSPI_PAR_ERROR 0x00000002U 1151#define BLKRDBOOTINT_V(x) ((x) << BLKRDBOOTINT_S)
833#define C_PCMD_PAR_ERROR 0x00000001U 1152#define BLKRDBOOTINT_F BLKRDBOOTINT_V(1U)
1153
1154#define SGLWRBOOTINT_S 7
1155#define SGLWRBOOTINT_V(x) ((x) << SGLWRBOOTINT_S)
1156#define SGLWRBOOTINT_F SGLWRBOOTINT_V(1U)
1157
1158#define SGLRDBOOTINT_S 6
1159#define SGLRDBOOTINT_V(x) ((x) << SGLRDBOOTINT_S)
1160#define SGLRDBOOTINT_F SGLRDBOOTINT_V(1U)
1161
1162#define ILLWRBEINT_S 5
1163#define ILLWRBEINT_V(x) ((x) << ILLWRBEINT_S)
1164#define ILLWRBEINT_F ILLWRBEINT_V(1U)
1165
1166#define ILLRDBEINT_S 4
1167#define ILLRDBEINT_V(x) ((x) << ILLRDBEINT_S)
1168#define ILLRDBEINT_F ILLRDBEINT_V(1U)
1169
1170#define ILLRDINT_S 3
1171#define ILLRDINT_V(x) ((x) << ILLRDINT_S)
1172#define ILLRDINT_F ILLRDINT_V(1U)
1173
1174#define ILLWRINT_S 2
1175#define ILLWRINT_V(x) ((x) << ILLWRINT_S)
1176#define ILLWRINT_F ILLWRINT_V(1U)
1177
1178#define ILLTRANSINT_S 1
1179#define ILLTRANSINT_V(x) ((x) << ILLTRANSINT_S)
1180#define ILLTRANSINT_F ILLTRANSINT_V(1U)
1181
1182#define RSVDSPACEINT_S 0
1183#define RSVDSPACEINT_V(x) ((x) << RSVDSPACEINT_S)
1184#define RSVDSPACEINT_F RSVDSPACEINT_V(1U)
1185
1186/* registers for module TP */
1187#define DBGLAWHLF_S 23
1188#define DBGLAWHLF_V(x) ((x) << DBGLAWHLF_S)
1189#define DBGLAWHLF_F DBGLAWHLF_V(1U)
1190
1191#define DBGLAWPTR_S 16
1192#define DBGLAWPTR_M 0x7fU
1193#define DBGLAWPTR_G(x) (((x) >> DBGLAWPTR_S) & DBGLAWPTR_M)
1194
1195#define DBGLAENABLE_S 12
1196#define DBGLAENABLE_V(x) ((x) << DBGLAENABLE_S)
1197#define DBGLAENABLE_F DBGLAENABLE_V(1U)
1198
1199#define DBGLARPTR_S 0
1200#define DBGLARPTR_M 0x7fU
1201#define DBGLARPTR_V(x) ((x) << DBGLARPTR_S)
1202
1203#define TP_DBG_LA_DATAL_A 0x7ed8
1204#define TP_DBG_LA_CONFIG_A 0x7ed4
1205#define TP_OUT_CONFIG_A 0x7d04
1206#define TP_GLOBAL_CONFIG_A 0x7d08
1207
1208#define DBGLAMODE_S 14
1209#define DBGLAMODE_M 0x3U
1210#define DBGLAMODE_G(x) (((x) >> DBGLAMODE_S) & DBGLAMODE_M)
1211
1212#define FIVETUPLELOOKUP_S 17
1213#define FIVETUPLELOOKUP_M 0x3U
1214#define FIVETUPLELOOKUP_V(x) ((x) << FIVETUPLELOOKUP_S)
1215#define FIVETUPLELOOKUP_G(x) (((x) >> FIVETUPLELOOKUP_S) & FIVETUPLELOOKUP_M)
1216
1217#define TP_PARA_REG2_A 0x7d68
1218
1219#define MAXRXDATA_S 16
1220#define MAXRXDATA_M 0xffffU
1221#define MAXRXDATA_G(x) (((x) >> MAXRXDATA_S) & MAXRXDATA_M)
1222
1223#define TP_TIMER_RESOLUTION_A 0x7d90
1224
1225#define TIMERRESOLUTION_S 16
1226#define TIMERRESOLUTION_M 0xffU
1227#define TIMERRESOLUTION_G(x) (((x) >> TIMERRESOLUTION_S) & TIMERRESOLUTION_M)
1228
1229#define TIMESTAMPRESOLUTION_S 8
1230#define TIMESTAMPRESOLUTION_M 0xffU
1231#define TIMESTAMPRESOLUTION_G(x) \
1232 (((x) >> TIMESTAMPRESOLUTION_S) & TIMESTAMPRESOLUTION_M)
1233
1234#define DELAYEDACKRESOLUTION_S 0
1235#define DELAYEDACKRESOLUTION_M 0xffU
1236#define DELAYEDACKRESOLUTION_G(x) \
1237 (((x) >> DELAYEDACKRESOLUTION_S) & DELAYEDACKRESOLUTION_M)
1238
1239#define TP_SHIFT_CNT_A 0x7dc0
1240#define TP_RXT_MIN_A 0x7d98
1241#define TP_RXT_MAX_A 0x7d9c
1242#define TP_PERS_MIN_A 0x7da0
1243#define TP_PERS_MAX_A 0x7da4
1244#define TP_KEEP_IDLE_A 0x7da8
1245#define TP_KEEP_INTVL_A 0x7dac
1246#define TP_INIT_SRTT_A 0x7db0
1247#define TP_DACK_TIMER_A 0x7db4
1248#define TP_FINWAIT2_TIMER_A 0x7db8
1249
1250#define INITSRTT_S 0
1251#define INITSRTT_M 0xffffU
1252#define INITSRTT_G(x) (((x) >> INITSRTT_S) & INITSRTT_M)
1253
1254#define PERSMAX_S 0
1255#define PERSMAX_M 0x3fffffffU
1256#define PERSMAX_V(x) ((x) << PERSMAX_S)
1257#define PERSMAX_G(x) (((x) >> PERSMAX_S) & PERSMAX_M)
1258
1259#define SYNSHIFTMAX_S 24
1260#define SYNSHIFTMAX_M 0xffU
1261#define SYNSHIFTMAX_V(x) ((x) << SYNSHIFTMAX_S)
1262#define SYNSHIFTMAX_G(x) (((x) >> SYNSHIFTMAX_S) & SYNSHIFTMAX_M)
1263
1264#define RXTSHIFTMAXR1_S 20
1265#define RXTSHIFTMAXR1_M 0xfU
1266#define RXTSHIFTMAXR1_V(x) ((x) << RXTSHIFTMAXR1_S)
1267#define RXTSHIFTMAXR1_G(x) (((x) >> RXTSHIFTMAXR1_S) & RXTSHIFTMAXR1_M)
1268
1269#define RXTSHIFTMAXR2_S 16
1270#define RXTSHIFTMAXR2_M 0xfU
1271#define RXTSHIFTMAXR2_V(x) ((x) << RXTSHIFTMAXR2_S)
1272#define RXTSHIFTMAXR2_G(x) (((x) >> RXTSHIFTMAXR2_S) & RXTSHIFTMAXR2_M)
1273
1274#define PERSHIFTBACKOFFMAX_S 12
1275#define PERSHIFTBACKOFFMAX_M 0xfU
1276#define PERSHIFTBACKOFFMAX_V(x) ((x) << PERSHIFTBACKOFFMAX_S)
1277#define PERSHIFTBACKOFFMAX_G(x) \
1278 (((x) >> PERSHIFTBACKOFFMAX_S) & PERSHIFTBACKOFFMAX_M)
1279
1280#define PERSHIFTMAX_S 8
1281#define PERSHIFTMAX_M 0xfU
1282#define PERSHIFTMAX_V(x) ((x) << PERSHIFTMAX_S)
1283#define PERSHIFTMAX_G(x) (((x) >> PERSHIFTMAX_S) & PERSHIFTMAX_M)
1284
1285#define KEEPALIVEMAXR1_S 4
1286#define KEEPALIVEMAXR1_M 0xfU
1287#define KEEPALIVEMAXR1_V(x) ((x) << KEEPALIVEMAXR1_S)
1288#define KEEPALIVEMAXR1_G(x) (((x) >> KEEPALIVEMAXR1_S) & KEEPALIVEMAXR1_M)
1289
1290#define KEEPALIVEMAXR2_S 0
1291#define KEEPALIVEMAXR2_M 0xfU
1292#define KEEPALIVEMAXR2_V(x) ((x) << KEEPALIVEMAXR2_S)
1293#define KEEPALIVEMAXR2_G(x) (((x) >> KEEPALIVEMAXR2_S) & KEEPALIVEMAXR2_M)
1294
1295#define ROWINDEX_S 16
1296#define ROWINDEX_V(x) ((x) << ROWINDEX_S)
1297
1298#define TP_CCTRL_TABLE_A 0x7ddc
1299#define TP_MTU_TABLE_A 0x7de4
1300
1301#define MTUINDEX_S 24
1302#define MTUINDEX_V(x) ((x) << MTUINDEX_S)
1303
1304#define MTUWIDTH_S 16
1305#define MTUWIDTH_M 0xfU
1306#define MTUWIDTH_V(x) ((x) << MTUWIDTH_S)
1307#define MTUWIDTH_G(x) (((x) >> MTUWIDTH_S) & MTUWIDTH_M)
1308
1309#define MTUVALUE_S 0
1310#define MTUVALUE_M 0x3fffU
1311#define MTUVALUE_V(x) ((x) << MTUVALUE_S)
1312#define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
1313
1314#define TP_RSS_LKP_TABLE_A 0x7dec
1315
1316#define LKPTBLROWVLD_S 31
1317#define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
1318#define LKPTBLROWVLD_F LKPTBLROWVLD_V(1U)
1319
1320#define LKPTBLQUEUE1_S 10
1321#define LKPTBLQUEUE1_M 0x3ffU
1322#define LKPTBLQUEUE1_G(x) (((x) >> LKPTBLQUEUE1_S) & LKPTBLQUEUE1_M)
1323
1324#define LKPTBLQUEUE0_S 0
1325#define LKPTBLQUEUE0_M 0x3ffU
1326#define LKPTBLQUEUE0_G(x) (((x) >> LKPTBLQUEUE0_S) & LKPTBLQUEUE0_M)
1327
1328#define TP_PIO_ADDR_A 0x7e40
1329#define TP_PIO_DATA_A 0x7e44
1330#define TP_MIB_INDEX_A 0x7e50
1331#define TP_MIB_DATA_A 0x7e54
1332#define TP_INT_CAUSE_A 0x7e74
1333
1334#define FLMTXFLSTEMPTY_S 30
1335#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
1336#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U)
1337
1338#define TP_VLAN_PRI_MAP_A 0x140
1339
1340#define FRAGMENTATION_S 9
1341#define FRAGMENTATION_V(x) ((x) << FRAGMENTATION_S)
1342#define FRAGMENTATION_F FRAGMENTATION_V(1U)
1343
1344#define MPSHITTYPE_S 8
1345#define MPSHITTYPE_V(x) ((x) << MPSHITTYPE_S)
1346#define MPSHITTYPE_F MPSHITTYPE_V(1U)
1347
1348#define MACMATCH_S 7
1349#define MACMATCH_V(x) ((x) << MACMATCH_S)
1350#define MACMATCH_F MACMATCH_V(1U)
1351
1352#define ETHERTYPE_S 6
1353#define ETHERTYPE_V(x) ((x) << ETHERTYPE_S)
1354#define ETHERTYPE_F ETHERTYPE_V(1U)
1355
1356#define PROTOCOL_S 5
1357#define PROTOCOL_V(x) ((x) << PROTOCOL_S)
1358#define PROTOCOL_F PROTOCOL_V(1U)
1359
1360#define TOS_S 4
1361#define TOS_V(x) ((x) << TOS_S)
1362#define TOS_F TOS_V(1U)
1363
1364#define VLAN_S 3
1365#define VLAN_V(x) ((x) << VLAN_S)
1366#define VLAN_F VLAN_V(1U)
1367
1368#define VNIC_ID_S 2
1369#define VNIC_ID_V(x) ((x) << VNIC_ID_S)
1370#define VNIC_ID_F VNIC_ID_V(1U)
1371
1372#define PORT_S 1
1373#define PORT_V(x) ((x) << PORT_S)
1374#define PORT_F PORT_V(1U)
1375
1376#define FCOE_S 0
1377#define FCOE_V(x) ((x) << FCOE_S)
1378#define FCOE_F FCOE_V(1U)
1379
1380#define FILTERMODE_S 15
1381#define FILTERMODE_V(x) ((x) << FILTERMODE_S)
1382#define FILTERMODE_F FILTERMODE_V(1U)
1383
1384#define FCOEMASK_S 14
1385#define FCOEMASK_V(x) ((x) << FCOEMASK_S)
1386#define FCOEMASK_F FCOEMASK_V(1U)
1387
1388#define TP_INGRESS_CONFIG_A 0x141
1389
1390#define VNIC_S 11
1391#define VNIC_V(x) ((x) << VNIC_S)
1392#define VNIC_F VNIC_V(1U)
1393
1394#define CSUM_HAS_PSEUDO_HDR_S 10
1395#define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S)
1396#define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U)
1397
1398#define TP_MIB_MAC_IN_ERR_0_A 0x0
1399#define TP_MIB_TCP_OUT_RST_A 0xc
1400#define TP_MIB_TCP_IN_SEG_HI_A 0x10
1401#define TP_MIB_TCP_IN_SEG_LO_A 0x11
1402#define TP_MIB_TCP_OUT_SEG_HI_A 0x12
1403#define TP_MIB_TCP_OUT_SEG_LO_A 0x13
1404#define TP_MIB_TCP_RXT_SEG_HI_A 0x14
1405#define TP_MIB_TCP_RXT_SEG_LO_A 0x15
1406#define TP_MIB_TNL_CNG_DROP_0_A 0x18
1407#define TP_MIB_TCP_V6IN_ERR_0_A 0x28
1408#define TP_MIB_TCP_V6OUT_RST_A 0x2c
1409#define TP_MIB_OFD_ARP_DROP_A 0x36
1410#define TP_MIB_TNL_DROP_0_A 0x44
1411#define TP_MIB_OFD_VLN_DROP_0_A 0x58
1412
1413#define ULP_TX_INT_CAUSE_A 0x8dcc
1414
1415#define PBL_BOUND_ERR_CH3_S 31
1416#define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
1417#define PBL_BOUND_ERR_CH3_F PBL_BOUND_ERR_CH3_V(1U)
1418
1419#define PBL_BOUND_ERR_CH2_S 30
1420#define PBL_BOUND_ERR_CH2_V(x) ((x) << PBL_BOUND_ERR_CH2_S)
1421#define PBL_BOUND_ERR_CH2_F PBL_BOUND_ERR_CH2_V(1U)
1422
1423#define PBL_BOUND_ERR_CH1_S 29
1424#define PBL_BOUND_ERR_CH1_V(x) ((x) << PBL_BOUND_ERR_CH1_S)
1425#define PBL_BOUND_ERR_CH1_F PBL_BOUND_ERR_CH1_V(1U)
1426
1427#define PBL_BOUND_ERR_CH0_S 28
1428#define PBL_BOUND_ERR_CH0_V(x) ((x) << PBL_BOUND_ERR_CH0_S)
1429#define PBL_BOUND_ERR_CH0_F PBL_BOUND_ERR_CH0_V(1U)
1430
1431#define PM_RX_INT_CAUSE_A 0x8fdc
1432#define PM_RX_STAT_CONFIG_A 0x8fc8
1433#define PM_RX_STAT_COUNT_A 0x8fcc
1434#define PM_RX_STAT_LSB_A 0x8fd0
1435#define PM_RX_DBG_CTRL_A 0x8fd0
1436#define PM_RX_DBG_DATA_A 0x8fd4
1437#define PM_RX_DBG_STAT_MSB_A 0x10013
1438
1439#define PMRX_FRAMING_ERROR_F 0x003ffff0U
1440
1441#define ZERO_E_CMD_ERROR_S 22
1442#define ZERO_E_CMD_ERROR_V(x) ((x) << ZERO_E_CMD_ERROR_S)
1443#define ZERO_E_CMD_ERROR_F ZERO_E_CMD_ERROR_V(1U)
1444
1445#define OCSPI_PAR_ERROR_S 3
1446#define OCSPI_PAR_ERROR_V(x) ((x) << OCSPI_PAR_ERROR_S)
1447#define OCSPI_PAR_ERROR_F OCSPI_PAR_ERROR_V(1U)
1448
1449#define DB_OPTIONS_PAR_ERROR_S 2
1450#define DB_OPTIONS_PAR_ERROR_V(x) ((x) << DB_OPTIONS_PAR_ERROR_S)
1451#define DB_OPTIONS_PAR_ERROR_F DB_OPTIONS_PAR_ERROR_V(1U)
1452
1453#define IESPI_PAR_ERROR_S 1
1454#define IESPI_PAR_ERROR_V(x) ((x) << IESPI_PAR_ERROR_S)
1455#define IESPI_PAR_ERROR_F IESPI_PAR_ERROR_V(1U)
1456
1457#define PMRX_E_PCMD_PAR_ERROR_S 0
1458#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
1459#define PMRX_E_PCMD_PAR_ERROR_F PMRX_E_PCMD_PAR_ERROR_V(1U)
1460
1461#define PM_TX_INT_CAUSE_A 0x8ffc
1462#define PM_TX_STAT_CONFIG_A 0x8fe8
1463#define PM_TX_STAT_COUNT_A 0x8fec
1464#define PM_TX_STAT_LSB_A 0x8ff0
1465#define PM_TX_DBG_CTRL_A 0x8ff0
1466#define PM_TX_DBG_DATA_A 0x8ff4
1467#define PM_TX_DBG_STAT_MSB_A 0x1001a
1468
1469#define PCMD_LEN_OVFL0_S 31
1470#define PCMD_LEN_OVFL0_V(x) ((x) << PCMD_LEN_OVFL0_S)
1471#define PCMD_LEN_OVFL0_F PCMD_LEN_OVFL0_V(1U)
1472
1473#define PCMD_LEN_OVFL1_S 30
1474#define PCMD_LEN_OVFL1_V(x) ((x) << PCMD_LEN_OVFL1_S)
1475#define PCMD_LEN_OVFL1_F PCMD_LEN_OVFL1_V(1U)
1476
1477#define PCMD_LEN_OVFL2_S 29
1478#define PCMD_LEN_OVFL2_V(x) ((x) << PCMD_LEN_OVFL2_S)
1479#define PCMD_LEN_OVFL2_F PCMD_LEN_OVFL2_V(1U)
1480
1481#define ZERO_C_CMD_ERROR_S 28
1482#define ZERO_C_CMD_ERROR_V(x) ((x) << ZERO_C_CMD_ERROR_S)
1483#define ZERO_C_CMD_ERROR_F ZERO_C_CMD_ERROR_V(1U)
1484
1485#define PMTX_FRAMING_ERROR_F 0x0ffffff0U
1486
1487#define OESPI_PAR_ERROR_S 3
1488#define OESPI_PAR_ERROR_V(x) ((x) << OESPI_PAR_ERROR_S)
1489#define OESPI_PAR_ERROR_F OESPI_PAR_ERROR_V(1U)
1490
1491#define ICSPI_PAR_ERROR_S 1
1492#define ICSPI_PAR_ERROR_V(x) ((x) << ICSPI_PAR_ERROR_S)
1493#define ICSPI_PAR_ERROR_F ICSPI_PAR_ERROR_V(1U)
1494
1495#define PMTX_C_PCMD_PAR_ERROR_S 0
1496#define PMTX_C_PCMD_PAR_ERROR_V(x) ((x) << PMTX_C_PCMD_PAR_ERROR_S)
1497#define PMTX_C_PCMD_PAR_ERROR_F PMTX_C_PCMD_PAR_ERROR_V(1U)
834 1498
835#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 1499#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
836#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 1500#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
@@ -959,41 +1623,57 @@
959#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c 1623#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
960#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 1624#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
961#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 1625#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
962#define MAC_PORT_CFG2 0x818
963#define MAC_PORT_MAGIC_MACID_LO 0x824 1626#define MAC_PORT_MAGIC_MACID_LO 0x824
964#define MAC_PORT_MAGIC_MACID_HI 0x828 1627#define MAC_PORT_MAGIC_MACID_HI 0x828
965#define MAC_PORT_EPIO_DATA0 0x8c0 1628
966#define MAC_PORT_EPIO_DATA1 0x8c4 1629#define MAC_PORT_EPIO_DATA0_A 0x8c0
967#define MAC_PORT_EPIO_DATA2 0x8c8 1630#define MAC_PORT_EPIO_DATA1_A 0x8c4
968#define MAC_PORT_EPIO_DATA3 0x8cc 1631#define MAC_PORT_EPIO_DATA2_A 0x8c8
969#define MAC_PORT_EPIO_OP 0x8d0 1632#define MAC_PORT_EPIO_DATA3_A 0x8cc
970 1633#define MAC_PORT_EPIO_OP_A 0x8d0
971#define MPS_CMN_CTL 0x9000 1634
972#define NUMPORTS_MASK 0x00000003U 1635#define MAC_PORT_CFG2_A 0x818
973#define NUMPORTS_SHIFT 0 1636
974#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT) 1637#define MPS_CMN_CTL_A 0x9000
975 1638
976#define MPS_INT_CAUSE 0x9008 1639#define NUMPORTS_S 0
977#define STATINT 0x00000020U 1640#define NUMPORTS_M 0x3U
978#define TXINT 0x00000010U 1641#define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M)
979#define RXINT 0x00000008U 1642
980#define TRCINT 0x00000004U 1643#define MPS_INT_CAUSE_A 0x9008
981#define CLSINT 0x00000002U 1644#define MPS_TX_INT_CAUSE_A 0x9408
982#define PLINT 0x00000001U 1645
983 1646#define FRMERR_S 15
984#define MPS_TX_INT_CAUSE 0x9408 1647#define FRMERR_V(x) ((x) << FRMERR_S)
985#define PORTERR 0x00010000U 1648#define FRMERR_F FRMERR_V(1U)
986#define FRMERR 0x00008000U 1649
987#define SECNTERR 0x00004000U 1650#define SECNTERR_S 14
988#define BUBBLE 0x00002000U 1651#define SECNTERR_V(x) ((x) << SECNTERR_S)
989#define TXDESCFIFO 0x00001e00U 1652#define SECNTERR_F SECNTERR_V(1U)
990#define TXDATAFIFO 0x000001e0U 1653
991#define NCSIFIFO 0x00000010U 1654#define BUBBLE_S 13
992#define TPFIFO 0x0000000fU 1655#define BUBBLE_V(x) ((x) << BUBBLE_S)
993 1656#define BUBBLE_F BUBBLE_V(1U)
994#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614 1657
995#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620 1658#define TXDESCFIFO_S 9
996#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c 1659#define TXDESCFIFO_M 0xfU
1660#define TXDESCFIFO_V(x) ((x) << TXDESCFIFO_S)
1661
1662#define TXDATAFIFO_S 5
1663#define TXDATAFIFO_M 0xfU
1664#define TXDATAFIFO_V(x) ((x) << TXDATAFIFO_S)
1665
1666#define NCSIFIFO_S 4
1667#define NCSIFIFO_V(x) ((x) << NCSIFIFO_S)
1668#define NCSIFIFO_F NCSIFIFO_V(1U)
1669
1670#define TPFIFO_S 0
1671#define TPFIFO_M 0xfU
1672#define TPFIFO_V(x) ((x) << TPFIFO_S)
1673
1674#define MPS_STAT_PERR_INT_CAUSE_SRAM_A 0x9614
1675#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A 0x9620
1676#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A 0x962c
997 1677
998#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 1678#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
999#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 1679#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
@@ -1027,294 +1707,851 @@
1027#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 1707#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
1028#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 1708#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
1029#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc 1709#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
1030#define MPS_TRC_CFG 0x9800
1031#define TRCFIFOEMPTY 0x00000010U
1032#define TRCIGNOREDROPINPUT 0x00000008U
1033#define TRCKEEPDUPLICATES 0x00000004U
1034#define TRCEN 0x00000002U
1035#define TRCMULTIFILTER 0x00000001U
1036
1037#define MPS_TRC_RSS_CONTROL 0x9808
1038#define MPS_T5_TRC_RSS_CONTROL 0xa00c
1039#define RSSCONTROL_MASK 0x00ff0000U
1040#define RSSCONTROL_SHIFT 16
1041#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
1042#define QUEUENUMBER_MASK 0x0000ffffU
1043#define QUEUENUMBER_SHIFT 0
1044#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT)
1045
1046#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
1047#define TFINVERTMATCH 0x01000000U
1048#define TFPKTTOOLARGE 0x00800000U
1049#define TFEN 0x00400000U
1050#define TFPORT_MASK 0x003c0000U
1051#define TFPORT_SHIFT 18
1052#define TFPORT(x) ((x) << TFPORT_SHIFT)
1053#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
1054#define TFDROP 0x00020000U
1055#define TFSOPEOPERR 0x00010000U
1056#define TFLENGTH_MASK 0x00001f00U
1057#define TFLENGTH_SHIFT 8
1058#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT)
1059#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
1060#define TFOFFSET_MASK 0x0000001fU
1061#define TFOFFSET_SHIFT 0
1062#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT)
1063#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
1064
1065#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
1066#define TFMINPKTSIZE_MASK 0x01ff0000U
1067#define TFMINPKTSIZE_SHIFT 16
1068#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT)
1069#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
1070#define TFCAPTUREMAX_MASK 0x00003fffU
1071#define TFCAPTUREMAX_SHIFT 0
1072#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT)
1073#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
1074
1075#define MPS_TRC_INT_CAUSE 0x985c
1076#define MISCPERR 0x00000100U
1077#define PKTFIFO 0x000000f0U
1078#define FILTMEM 0x0000000fU
1079
1080#define MPS_TRC_FILTER0_MATCH 0x9c00
1081#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
1082#define MPS_TRC_FILTER1_MATCH 0x9d00
1083#define MPS_CLS_INT_CAUSE 0xd028
1084#define PLERRENB 0x00000008U
1085#define HASHSRAM 0x00000004U
1086#define MATCHTCAM 0x00000002U
1087#define MATCHSRAM 0x00000001U
1088
1089#define MPS_RX_PERR_INT_CAUSE 0x11074
1090
1091#define CPL_INTR_CAUSE 0x19054
1092#define CIM_OP_MAP_PERR 0x00000020U
1093#define CIM_OVFL_ERROR 0x00000010U
1094#define TP_FRAMING_ERROR 0x00000008U
1095#define SGE_FRAMING_ERROR 0x00000004U
1096#define CIM_FRAMING_ERROR 0x00000002U
1097#define ZERO_SWITCH_ERROR 0x00000001U
1098
1099#define SMB_INT_CAUSE 0x19090
1100#define MSTTXFIFOPARINT 0x00200000U
1101#define MSTRXFIFOPARINT 0x00100000U
1102#define SLVFIFOPARINT 0x00080000U
1103
1104#define ULP_RX_INT_CAUSE 0x19158
1105#define ULP_RX_ISCSI_TAGMASK 0x19164
1106#define ULP_RX_ISCSI_PSZ 0x19168
1107#define HPZ3_MASK 0x0f000000U
1108#define HPZ3_SHIFT 24
1109#define HPZ3(x) ((x) << HPZ3_SHIFT)
1110#define HPZ2_MASK 0x000f0000U
1111#define HPZ2_SHIFT 16
1112#define HPZ2(x) ((x) << HPZ2_SHIFT)
1113#define HPZ1_MASK 0x00000f00U
1114#define HPZ1_SHIFT 8
1115#define HPZ1(x) ((x) << HPZ1_SHIFT)
1116#define HPZ0_MASK 0x0000000fU
1117#define HPZ0_SHIFT 0
1118#define HPZ0(x) ((x) << HPZ0_SHIFT)
1119
1120#define ULP_RX_TDDP_PSZ 0x19178
1121
1122#define SF_DATA 0x193f8
1123#define SF_OP 0x193fc
1124#define SF_BUSY 0x80000000U
1125#define SF_LOCK 0x00000010U
1126#define SF_CONT 0x00000008U
1127#define BYTECNT_MASK 0x00000006U
1128#define BYTECNT_SHIFT 1
1129#define BYTECNT(x) ((x) << BYTECNT_SHIFT)
1130#define OP_WR 0x00000001U
1131
1132#define PL_PF_INT_CAUSE 0x3c0
1133#define PFSW 0x00000008U
1134#define PFSGE 0x00000004U
1135#define PFCIM 0x00000002U
1136#define PFMPS 0x00000001U
1137
1138#define PL_PF_INT_ENABLE 0x3c4
1139#define PL_PF_CTL 0x3c8
1140#define SWINT 0x00000001U
1141
1142#define PL_WHOAMI 0x19400
1143#define SOURCEPF_MASK 0x00000700U
1144#define SOURCEPF_SHIFT 8
1145#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT)
1146#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
1147#define ISVF 0x00000080U
1148#define VFID_MASK 0x0000007fU
1149#define VFID_SHIFT 0
1150#define VFID(x) ((x) << VFID_SHIFT)
1151#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT)
1152
1153#define PL_INT_CAUSE 0x1940c
1154#define ULP_TX 0x08000000U
1155#define SGE 0x04000000U
1156#define HMA 0x02000000U
1157#define CPL_SWITCH 0x01000000U
1158#define ULP_RX 0x00800000U
1159#define PM_RX 0x00400000U
1160#define PM_TX 0x00200000U
1161#define MA 0x00100000U
1162#define TP 0x00080000U
1163#define LE 0x00040000U
1164#define EDC1 0x00020000U
1165#define EDC0 0x00010000U
1166#define MC 0x00008000U
1167#define PCIE 0x00004000U
1168#define PMU 0x00002000U
1169#define XGMAC_KR1 0x00001000U
1170#define XGMAC_KR0 0x00000800U
1171#define XGMAC1 0x00000400U
1172#define XGMAC0 0x00000200U
1173#define SMB 0x00000100U
1174#define SF 0x00000080U
1175#define PL 0x00000040U
1176#define NCSI 0x00000020U
1177#define MPS 0x00000010U
1178#define MI 0x00000008U
1179#define DBG 0x00000004U
1180#define I2CM 0x00000002U
1181#define CIM 0x00000001U
1182
1183#define MC1 0x31
1184#define PL_INT_ENABLE 0x19410
1185#define PL_INT_MAP0 0x19414
1186#define PL_RST 0x19428
1187#define PIORST 0x00000002U
1188#define PIORSTMODE 0x00000001U
1189
1190#define PL_PL_INT_CAUSE 0x19430
1191#define FATALPERR 0x00000010U
1192#define PERRVFID 0x00000001U
1193
1194#define PL_REV 0x1943c
1195
1196#define S_REV 0
1197#define M_REV 0xfU
1198#define V_REV(x) ((x) << S_REV)
1199#define G_REV(x) (((x) >> S_REV) & M_REV)
1200
1201#define LE_DB_CONFIG 0x19c04
1202#define HASHEN 0x00100000U
1203
1204#define LE_DB_SERVER_INDEX 0x19c18
1205#define LE_DB_ACT_CNT_IPV4 0x19c20
1206#define LE_DB_ACT_CNT_IPV6 0x19c24
1207
1208#define LE_DB_INT_CAUSE 0x19c3c
1209#define REQQPARERR 0x00010000U
1210#define UNKNOWNCMD 0x00008000U
1211#define PARITYERR 0x00000040U
1212#define LIPMISS 0x00000020U
1213#define LIP0 0x00000010U
1214
1215#define LE_DB_TID_HASHBASE 0x19df8
1216
1217#define NCSI_INT_CAUSE 0x1a0d8
1218#define CIM_DM_PRTY_ERR 0x00000100U
1219#define MPS_DM_PRTY_ERR 0x00000080U
1220#define TXFIFO_PRTY_ERR 0x00000002U
1221#define RXFIFO_PRTY_ERR 0x00000001U
1222
1223#define XGMAC_PORT_CFG2 0x1018
1224#define PATEN 0x00040000U
1225#define MAGICEN 0x00020000U
1226 1710
1227#define XGMAC_PORT_MAGIC_MACID_LO 0x1024 1711#define MPS_TRC_CFG_A 0x9800
1228#define XGMAC_PORT_MAGIC_MACID_HI 0x1028 1712
1713#define TRCFIFOEMPTY_S 4
1714#define TRCFIFOEMPTY_V(x) ((x) << TRCFIFOEMPTY_S)
1715#define TRCFIFOEMPTY_F TRCFIFOEMPTY_V(1U)
1716
1717#define TRCIGNOREDROPINPUT_S 3
1718#define TRCIGNOREDROPINPUT_V(x) ((x) << TRCIGNOREDROPINPUT_S)
1719#define TRCIGNOREDROPINPUT_F TRCIGNOREDROPINPUT_V(1U)
1720
1721#define TRCKEEPDUPLICATES_S 2
1722#define TRCKEEPDUPLICATES_V(x) ((x) << TRCKEEPDUPLICATES_S)
1723#define TRCKEEPDUPLICATES_F TRCKEEPDUPLICATES_V(1U)
1724
1725#define TRCEN_S 1
1726#define TRCEN_V(x) ((x) << TRCEN_S)
1727#define TRCEN_F TRCEN_V(1U)
1728
1729#define TRCMULTIFILTER_S 0
1730#define TRCMULTIFILTER_V(x) ((x) << TRCMULTIFILTER_S)
1731#define TRCMULTIFILTER_F TRCMULTIFILTER_V(1U)
1732
1733#define MPS_TRC_RSS_CONTROL_A 0x9808
1734#define MPS_T5_TRC_RSS_CONTROL_A 0xa00c
1735
1736#define RSSCONTROL_S 16
1737#define RSSCONTROL_V(x) ((x) << RSSCONTROL_S)
1738
1739#define QUEUENUMBER_S 0
1740#define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S)
1741
1742#define TP_RSS_CONFIG_A 0x7df0
1743
1744#define TNL4TUPENIPV6_S 31
1745#define TNL4TUPENIPV6_V(x) ((x) << TNL4TUPENIPV6_S)
1746#define TNL4TUPENIPV6_F TNL4TUPENIPV6_V(1U)
1747
1748#define TNL2TUPENIPV6_S 30
1749#define TNL2TUPENIPV6_V(x) ((x) << TNL2TUPENIPV6_S)
1750#define TNL2TUPENIPV6_F TNL2TUPENIPV6_V(1U)
1751
1752#define TNL4TUPENIPV4_S 29
1753#define TNL4TUPENIPV4_V(x) ((x) << TNL4TUPENIPV4_S)
1754#define TNL4TUPENIPV4_F TNL4TUPENIPV4_V(1U)
1755
1756#define TNL2TUPENIPV4_S 28
1757#define TNL2TUPENIPV4_V(x) ((x) << TNL2TUPENIPV4_S)
1758#define TNL2TUPENIPV4_F TNL2TUPENIPV4_V(1U)
1759
1760#define TNLTCPSEL_S 27
1761#define TNLTCPSEL_V(x) ((x) << TNLTCPSEL_S)
1762#define TNLTCPSEL_F TNLTCPSEL_V(1U)
1763
1764#define TNLIP6SEL_S 26
1765#define TNLIP6SEL_V(x) ((x) << TNLIP6SEL_S)
1766#define TNLIP6SEL_F TNLIP6SEL_V(1U)
1767
1768#define TNLVRTSEL_S 25
1769#define TNLVRTSEL_V(x) ((x) << TNLVRTSEL_S)
1770#define TNLVRTSEL_F TNLVRTSEL_V(1U)
1771
1772#define TNLMAPEN_S 24
1773#define TNLMAPEN_V(x) ((x) << TNLMAPEN_S)
1774#define TNLMAPEN_F TNLMAPEN_V(1U)
1775
1776#define OFDHASHSAVE_S 19
1777#define OFDHASHSAVE_V(x) ((x) << OFDHASHSAVE_S)
1778#define OFDHASHSAVE_F OFDHASHSAVE_V(1U)
1779
1780#define OFDVRTSEL_S 18
1781#define OFDVRTSEL_V(x) ((x) << OFDVRTSEL_S)
1782#define OFDVRTSEL_F OFDVRTSEL_V(1U)
1783
1784#define OFDMAPEN_S 17
1785#define OFDMAPEN_V(x) ((x) << OFDMAPEN_S)
1786#define OFDMAPEN_F OFDMAPEN_V(1U)
1787
1788#define OFDLKPEN_S 16
1789#define OFDLKPEN_V(x) ((x) << OFDLKPEN_S)
1790#define OFDLKPEN_F OFDLKPEN_V(1U)
1791
1792#define SYN4TUPENIPV6_S 15
1793#define SYN4TUPENIPV6_V(x) ((x) << SYN4TUPENIPV6_S)
1794#define SYN4TUPENIPV6_F SYN4TUPENIPV6_V(1U)
1795
1796#define SYN2TUPENIPV6_S 14
1797#define SYN2TUPENIPV6_V(x) ((x) << SYN2TUPENIPV6_S)
1798#define SYN2TUPENIPV6_F SYN2TUPENIPV6_V(1U)
1799
1800#define SYN4TUPENIPV4_S 13
1801#define SYN4TUPENIPV4_V(x) ((x) << SYN4TUPENIPV4_S)
1802#define SYN4TUPENIPV4_F SYN4TUPENIPV4_V(1U)
1803
1804#define SYN2TUPENIPV4_S 12
1805#define SYN2TUPENIPV4_V(x) ((x) << SYN2TUPENIPV4_S)
1806#define SYN2TUPENIPV4_F SYN2TUPENIPV4_V(1U)
1807
1808#define SYNIP6SEL_S 11
1809#define SYNIP6SEL_V(x) ((x) << SYNIP6SEL_S)
1810#define SYNIP6SEL_F SYNIP6SEL_V(1U)
1811
1812#define SYNVRTSEL_S 10
1813#define SYNVRTSEL_V(x) ((x) << SYNVRTSEL_S)
1814#define SYNVRTSEL_F SYNVRTSEL_V(1U)
1815
1816#define SYNMAPEN_S 9
1817#define SYNMAPEN_V(x) ((x) << SYNMAPEN_S)
1818#define SYNMAPEN_F SYNMAPEN_V(1U)
1819
1820#define SYNLKPEN_S 8
1821#define SYNLKPEN_V(x) ((x) << SYNLKPEN_S)
1822#define SYNLKPEN_F SYNLKPEN_V(1U)
1823
1824#define CHANNELENABLE_S 7
1825#define CHANNELENABLE_V(x) ((x) << CHANNELENABLE_S)
1826#define CHANNELENABLE_F CHANNELENABLE_V(1U)
1827
1828#define PORTENABLE_S 6
1829#define PORTENABLE_V(x) ((x) << PORTENABLE_S)
1830#define PORTENABLE_F PORTENABLE_V(1U)
1831
1832#define TNLALLLOOKUP_S 5
1833#define TNLALLLOOKUP_V(x) ((x) << TNLALLLOOKUP_S)
1834#define TNLALLLOOKUP_F TNLALLLOOKUP_V(1U)
1835
1836#define VIRTENABLE_S 4
1837#define VIRTENABLE_V(x) ((x) << VIRTENABLE_S)
1838#define VIRTENABLE_F VIRTENABLE_V(1U)
1839
1840#define CONGESTIONENABLE_S 3
1841#define CONGESTIONENABLE_V(x) ((x) << CONGESTIONENABLE_S)
1842#define CONGESTIONENABLE_F CONGESTIONENABLE_V(1U)
1843
1844#define HASHTOEPLITZ_S 2
1845#define HASHTOEPLITZ_V(x) ((x) << HASHTOEPLITZ_S)
1846#define HASHTOEPLITZ_F HASHTOEPLITZ_V(1U)
1847
1848#define UDPENABLE_S 1
1849#define UDPENABLE_V(x) ((x) << UDPENABLE_S)
1850#define UDPENABLE_F UDPENABLE_V(1U)
1851
1852#define DISABLE_S 0
1853#define DISABLE_V(x) ((x) << DISABLE_S)
1854#define DISABLE_F DISABLE_V(1U)
1855
1856#define TP_RSS_CONFIG_TNL_A 0x7df4
1857
1858#define MASKSIZE_S 28
1859#define MASKSIZE_M 0xfU
1860#define MASKSIZE_V(x) ((x) << MASKSIZE_S)
1861#define MASKSIZE_G(x) (((x) >> MASKSIZE_S) & MASKSIZE_M)
1862
1863#define MASKFILTER_S 16
1864#define MASKFILTER_M 0x7ffU
1865#define MASKFILTER_V(x) ((x) << MASKFILTER_S)
1866#define MASKFILTER_G(x) (((x) >> MASKFILTER_S) & MASKFILTER_M)
1867
1868#define USEWIRECH_S 0
1869#define USEWIRECH_V(x) ((x) << USEWIRECH_S)
1870#define USEWIRECH_F USEWIRECH_V(1U)
1871
1872#define HASHALL_S 2
1873#define HASHALL_V(x) ((x) << HASHALL_S)
1874#define HASHALL_F HASHALL_V(1U)
1875
1876#define HASHETH_S 1
1877#define HASHETH_V(x) ((x) << HASHETH_S)
1878#define HASHETH_F HASHETH_V(1U)
1879
1880#define TP_RSS_CONFIG_OFD_A 0x7df8
1881
1882#define RRCPLMAPEN_S 20
1883#define RRCPLMAPEN_V(x) ((x) << RRCPLMAPEN_S)
1884#define RRCPLMAPEN_F RRCPLMAPEN_V(1U)
1885
1886#define RRCPLQUEWIDTH_S 16
1887#define RRCPLQUEWIDTH_M 0xfU
1888#define RRCPLQUEWIDTH_V(x) ((x) << RRCPLQUEWIDTH_S)
1889#define RRCPLQUEWIDTH_G(x) (((x) >> RRCPLQUEWIDTH_S) & RRCPLQUEWIDTH_M)
1890
1891#define TP_RSS_CONFIG_SYN_A 0x7dfc
1892#define TP_RSS_CONFIG_VRT_A 0x7e00
1893
1894#define VFRDRG_S 25
1895#define VFRDRG_V(x) ((x) << VFRDRG_S)
1896#define VFRDRG_F VFRDRG_V(1U)
1897
1898#define VFRDEN_S 24
1899#define VFRDEN_V(x) ((x) << VFRDEN_S)
1900#define VFRDEN_F VFRDEN_V(1U)
1901
1902#define VFPERREN_S 23
1903#define VFPERREN_V(x) ((x) << VFPERREN_S)
1904#define VFPERREN_F VFPERREN_V(1U)
1905
1906#define KEYPERREN_S 22
1907#define KEYPERREN_V(x) ((x) << KEYPERREN_S)
1908#define KEYPERREN_F KEYPERREN_V(1U)
1909
1910#define DISABLEVLAN_S 21
1911#define DISABLEVLAN_V(x) ((x) << DISABLEVLAN_S)
1912#define DISABLEVLAN_F DISABLEVLAN_V(1U)
1913
1914#define ENABLEUP0_S 20
1915#define ENABLEUP0_V(x) ((x) << ENABLEUP0_S)
1916#define ENABLEUP0_F ENABLEUP0_V(1U)
1917
1918#define HASHDELAY_S 16
1919#define HASHDELAY_M 0xfU
1920#define HASHDELAY_V(x) ((x) << HASHDELAY_S)
1921#define HASHDELAY_G(x) (((x) >> HASHDELAY_S) & HASHDELAY_M)
1922
1923#define VFWRADDR_S 8
1924#define VFWRADDR_M 0x7fU
1925#define VFWRADDR_V(x) ((x) << VFWRADDR_S)
1926#define VFWRADDR_G(x) (((x) >> VFWRADDR_S) & VFWRADDR_M)
1927
1928#define KEYMODE_S 6
1929#define KEYMODE_M 0x3U
1930#define KEYMODE_V(x) ((x) << KEYMODE_S)
1931#define KEYMODE_G(x) (((x) >> KEYMODE_S) & KEYMODE_M)
1932
1933#define VFWREN_S 5
1934#define VFWREN_V(x) ((x) << VFWREN_S)
1935#define VFWREN_F VFWREN_V(1U)
1936
1937#define KEYWREN_S 4
1938#define KEYWREN_V(x) ((x) << KEYWREN_S)
1939#define KEYWREN_F KEYWREN_V(1U)
1940
1941#define KEYWRADDR_S 0
1942#define KEYWRADDR_M 0xfU
1943#define KEYWRADDR_V(x) ((x) << KEYWRADDR_S)
1944#define KEYWRADDR_G(x) (((x) >> KEYWRADDR_S) & KEYWRADDR_M)
1945
1946#define KEYWRADDRX_S 30
1947#define KEYWRADDRX_M 0x3U
1948#define KEYWRADDRX_V(x) ((x) << KEYWRADDRX_S)
1949#define KEYWRADDRX_G(x) (((x) >> KEYWRADDRX_S) & KEYWRADDRX_M)
1950
1951#define KEYEXTEND_S 26
1952#define KEYEXTEND_V(x) ((x) << KEYEXTEND_S)
1953#define KEYEXTEND_F KEYEXTEND_V(1U)
1954
1955#define LKPIDXSIZE_S 24
1956#define LKPIDXSIZE_M 0x3U
1957#define LKPIDXSIZE_V(x) ((x) << LKPIDXSIZE_S)
1958#define LKPIDXSIZE_G(x) (((x) >> LKPIDXSIZE_S) & LKPIDXSIZE_M)
1959
1960#define TP_RSS_VFL_CONFIG_A 0x3a
1961#define TP_RSS_VFH_CONFIG_A 0x3b
1962
1963#define ENABLEUDPHASH_S 31
1964#define ENABLEUDPHASH_V(x) ((x) << ENABLEUDPHASH_S)
1965#define ENABLEUDPHASH_F ENABLEUDPHASH_V(1U)
1966
1967#define VFUPEN_S 30
1968#define VFUPEN_V(x) ((x) << VFUPEN_S)
1969#define VFUPEN_F VFUPEN_V(1U)
1970
1971#define VFVLNEX_S 28
1972#define VFVLNEX_V(x) ((x) << VFVLNEX_S)
1973#define VFVLNEX_F VFVLNEX_V(1U)
1974
1975#define VFPRTEN_S 27
1976#define VFPRTEN_V(x) ((x) << VFPRTEN_S)
1977#define VFPRTEN_F VFPRTEN_V(1U)
1978
1979#define VFCHNEN_S 26
1980#define VFCHNEN_V(x) ((x) << VFCHNEN_S)
1981#define VFCHNEN_F VFCHNEN_V(1U)
1982
1983#define DEFAULTQUEUE_S 16
1984#define DEFAULTQUEUE_M 0x3ffU
1985#define DEFAULTQUEUE_G(x) (((x) >> DEFAULTQUEUE_S) & DEFAULTQUEUE_M)
1986
1987#define VFIP6TWOTUPEN_S 6
1988#define VFIP6TWOTUPEN_V(x) ((x) << VFIP6TWOTUPEN_S)
1989#define VFIP6TWOTUPEN_F VFIP6TWOTUPEN_V(1U)
1990
1991#define VFIP4FOURTUPEN_S 5
1992#define VFIP4FOURTUPEN_V(x) ((x) << VFIP4FOURTUPEN_S)
1993#define VFIP4FOURTUPEN_F VFIP4FOURTUPEN_V(1U)
1994
1995#define VFIP4TWOTUPEN_S 4
1996#define VFIP4TWOTUPEN_V(x) ((x) << VFIP4TWOTUPEN_S)
1997#define VFIP4TWOTUPEN_F VFIP4TWOTUPEN_V(1U)
1998
1999#define KEYINDEX_S 0
2000#define KEYINDEX_M 0xfU
2001#define KEYINDEX_G(x) (((x) >> KEYINDEX_S) & KEYINDEX_M)
2002
2003#define MAPENABLE_S 31
2004#define MAPENABLE_V(x) ((x) << MAPENABLE_S)
2005#define MAPENABLE_F MAPENABLE_V(1U)
2006
2007#define CHNENABLE_S 30
2008#define CHNENABLE_V(x) ((x) << CHNENABLE_S)
2009#define CHNENABLE_F CHNENABLE_V(1U)
2010
2011#define PRTENABLE_S 29
2012#define PRTENABLE_V(x) ((x) << PRTENABLE_S)
2013#define PRTENABLE_F PRTENABLE_V(1U)
2014
2015#define UDPFOURTUPEN_S 28
2016#define UDPFOURTUPEN_V(x) ((x) << UDPFOURTUPEN_S)
2017#define UDPFOURTUPEN_F UDPFOURTUPEN_V(1U)
2018
2019#define IP6FOURTUPEN_S 27
2020#define IP6FOURTUPEN_V(x) ((x) << IP6FOURTUPEN_S)
2021#define IP6FOURTUPEN_F IP6FOURTUPEN_V(1U)
1229 2022
1230#define XGMAC_PORT_EPIO_DATA0 0x10c0 2023#define IP6TWOTUPEN_S 26
1231#define XGMAC_PORT_EPIO_DATA1 0x10c4 2024#define IP6TWOTUPEN_V(x) ((x) << IP6TWOTUPEN_S)
1232#define XGMAC_PORT_EPIO_DATA2 0x10c8 2025#define IP6TWOTUPEN_F IP6TWOTUPEN_V(1U)
1233#define XGMAC_PORT_EPIO_DATA3 0x10cc
1234#define XGMAC_PORT_EPIO_OP 0x10d0
1235#define EPIOWR 0x00000100U
1236#define ADDRESS_MASK 0x000000ffU
1237#define ADDRESS_SHIFT 0
1238#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
1239 2026
1240#define MAC_PORT_INT_CAUSE 0x8dc 2027#define IP4FOURTUPEN_S 25
1241#define XGMAC_PORT_INT_CAUSE 0x10dc 2028#define IP4FOURTUPEN_V(x) ((x) << IP4FOURTUPEN_S)
2029#define IP4FOURTUPEN_F IP4FOURTUPEN_V(1U)
1242 2030
1243#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28 2031#define IP4TWOTUPEN_S 24
2032#define IP4TWOTUPEN_V(x) ((x) << IP4TWOTUPEN_S)
2033#define IP4TWOTUPEN_F IP4TWOTUPEN_V(1U)
1244 2034
1245#define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34 2035#define IVFWIDTH_S 20
2036#define IVFWIDTH_M 0xfU
2037#define IVFWIDTH_V(x) ((x) << IVFWIDTH_S)
2038#define IVFWIDTH_G(x) (((x) >> IVFWIDTH_S) & IVFWIDTH_M)
1246 2039
1247#define S_TX_MOD_QUEUE_REQ_MAP 0 2040#define CH1DEFAULTQUEUE_S 10
1248#define M_TX_MOD_QUEUE_REQ_MAP 0xffffU 2041#define CH1DEFAULTQUEUE_M 0x3ffU
1249#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP) 2042#define CH1DEFAULTQUEUE_V(x) ((x) << CH1DEFAULTQUEUE_S)
2043#define CH1DEFAULTQUEUE_G(x) (((x) >> CH1DEFAULTQUEUE_S) & CH1DEFAULTQUEUE_M)
1250 2044
1251#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30 2045#define CH0DEFAULTQUEUE_S 0
2046#define CH0DEFAULTQUEUE_M 0x3ffU
2047#define CH0DEFAULTQUEUE_V(x) ((x) << CH0DEFAULTQUEUE_S)
2048#define CH0DEFAULTQUEUE_G(x) (((x) >> CH0DEFAULTQUEUE_S) & CH0DEFAULTQUEUE_M)
1252 2049
1253#define S_TX_MODQ_WEIGHT3 24 2050#define VFLKPIDX_S 8
1254#define M_TX_MODQ_WEIGHT3 0xffU 2051#define VFLKPIDX_M 0xffU
1255#define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3) 2052#define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M)
1256 2053
1257#define S_TX_MODQ_WEIGHT2 16 2054#define TP_RSS_CONFIG_CNG_A 0x7e04
1258#define M_TX_MODQ_WEIGHT2 0xffU 2055#define TP_RSS_SECRET_KEY0_A 0x40
1259#define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2) 2056#define TP_RSS_PF0_CONFIG_A 0x30
2057#define TP_RSS_PF_MAP_A 0x38
2058#define TP_RSS_PF_MSK_A 0x39
1260 2059
1261#define S_TX_MODQ_WEIGHT1 8 2060#define PF1LKPIDX_S 3
1262#define M_TX_MODQ_WEIGHT1 0xffU
1263#define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1)
1264 2061
1265#define S_TX_MODQ_WEIGHT0 0 2062#define PF0LKPIDX_M 0x7U
1266#define M_TX_MODQ_WEIGHT0 0xffU
1267#define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0)
1268 2063
1269#define A_TP_TX_SCHED_HDR 0x23 2064#define PF1MSKSIZE_S 4
2065#define PF1MSKSIZE_M 0xfU
1270 2066
1271#define A_TP_TX_SCHED_FIFO 0x24 2067#define CHNCOUNT3_S 31
2068#define CHNCOUNT3_V(x) ((x) << CHNCOUNT3_S)
2069#define CHNCOUNT3_F CHNCOUNT3_V(1U)
1272 2070
1273#define A_TP_TX_SCHED_PCMD 0x25 2071#define CHNCOUNT2_S 30
2072#define CHNCOUNT2_V(x) ((x) << CHNCOUNT2_S)
2073#define CHNCOUNT2_F CHNCOUNT2_V(1U)
1274 2074
1275#define S_VNIC 11 2075#define CHNCOUNT1_S 29
1276#define V_VNIC(x) ((x) << S_VNIC) 2076#define CHNCOUNT1_V(x) ((x) << CHNCOUNT1_S)
1277#define F_VNIC V_VNIC(1U) 2077#define CHNCOUNT1_F CHNCOUNT1_V(1U)
1278 2078
1279#define S_FRAGMENTATION 9 2079#define CHNCOUNT0_S 28
1280#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION) 2080#define CHNCOUNT0_V(x) ((x) << CHNCOUNT0_S)
1281#define F_FRAGMENTATION V_FRAGMENTATION(1U) 2081#define CHNCOUNT0_F CHNCOUNT0_V(1U)
1282 2082
1283#define S_MPSHITTYPE 8 2083#define CHNUNDFLOW3_S 27
1284#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE) 2084#define CHNUNDFLOW3_V(x) ((x) << CHNUNDFLOW3_S)
1285#define F_MPSHITTYPE V_MPSHITTYPE(1U) 2085#define CHNUNDFLOW3_F CHNUNDFLOW3_V(1U)
1286 2086
1287#define S_MACMATCH 7 2087#define CHNUNDFLOW2_S 26
1288#define V_MACMATCH(x) ((x) << S_MACMATCH) 2088#define CHNUNDFLOW2_V(x) ((x) << CHNUNDFLOW2_S)
1289#define F_MACMATCH V_MACMATCH(1U) 2089#define CHNUNDFLOW2_F CHNUNDFLOW2_V(1U)
1290 2090
1291#define S_ETHERTYPE 6 2091#define CHNUNDFLOW1_S 25
1292#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE) 2092#define CHNUNDFLOW1_V(x) ((x) << CHNUNDFLOW1_S)
1293#define F_ETHERTYPE V_ETHERTYPE(1U) 2093#define CHNUNDFLOW1_F CHNUNDFLOW1_V(1U)
1294 2094
1295#define S_PROTOCOL 5 2095#define CHNUNDFLOW0_S 24
1296#define V_PROTOCOL(x) ((x) << S_PROTOCOL) 2096#define CHNUNDFLOW0_V(x) ((x) << CHNUNDFLOW0_S)
1297#define F_PROTOCOL V_PROTOCOL(1U) 2097#define CHNUNDFLOW0_F CHNUNDFLOW0_V(1U)
1298 2098
1299#define S_TOS 4 2099#define RSTCHN3_S 19
1300#define V_TOS(x) ((x) << S_TOS) 2100#define RSTCHN3_V(x) ((x) << RSTCHN3_S)
1301#define F_TOS V_TOS(1U) 2101#define RSTCHN3_F RSTCHN3_V(1U)
1302 2102
1303#define S_VLAN 3 2103#define RSTCHN2_S 18
1304#define V_VLAN(x) ((x) << S_VLAN) 2104#define RSTCHN2_V(x) ((x) << RSTCHN2_S)
1305#define F_VLAN V_VLAN(1U) 2105#define RSTCHN2_F RSTCHN2_V(1U)
1306 2106
1307#define S_VNIC_ID 2 2107#define RSTCHN1_S 17
1308#define V_VNIC_ID(x) ((x) << S_VNIC_ID) 2108#define RSTCHN1_V(x) ((x) << RSTCHN1_S)
1309#define F_VNIC_ID V_VNIC_ID(1U) 2109#define RSTCHN1_F RSTCHN1_V(1U)
1310 2110
1311#define S_PORT 1 2111#define RSTCHN0_S 16
1312#define V_PORT(x) ((x) << S_PORT) 2112#define RSTCHN0_V(x) ((x) << RSTCHN0_S)
1313#define F_PORT V_PORT(1U) 2113#define RSTCHN0_F RSTCHN0_V(1U)
1314 2114
1315#define S_FCOE 0 2115#define UPDVLD_S 15
1316#define V_FCOE(x) ((x) << S_FCOE) 2116#define UPDVLD_V(x) ((x) << UPDVLD_S)
1317#define F_FCOE V_FCOE(1U) 2117#define UPDVLD_F UPDVLD_V(1U)
2118
2119#define XOFF_S 14
2120#define XOFF_V(x) ((x) << XOFF_S)
2121#define XOFF_F XOFF_V(1U)
2122
2123#define UPDCHN3_S 13
2124#define UPDCHN3_V(x) ((x) << UPDCHN3_S)
2125#define UPDCHN3_F UPDCHN3_V(1U)
2126
2127#define UPDCHN2_S 12
2128#define UPDCHN2_V(x) ((x) << UPDCHN2_S)
2129#define UPDCHN2_F UPDCHN2_V(1U)
2130
2131#define UPDCHN1_S 11
2132#define UPDCHN1_V(x) ((x) << UPDCHN1_S)
2133#define UPDCHN1_F UPDCHN1_V(1U)
2134
2135#define UPDCHN0_S 10
2136#define UPDCHN0_V(x) ((x) << UPDCHN0_S)
2137#define UPDCHN0_F UPDCHN0_V(1U)
2138
2139#define QUEUE_S 0
2140#define QUEUE_M 0x3ffU
2141#define QUEUE_V(x) ((x) << QUEUE_S)
2142#define QUEUE_G(x) (((x) >> QUEUE_S) & QUEUE_M)
2143
2144#define MPS_TRC_INT_CAUSE_A 0x985c
2145
2146#define MISCPERR_S 8
2147#define MISCPERR_V(x) ((x) << MISCPERR_S)
2148#define MISCPERR_F MISCPERR_V(1U)
2149
2150#define PKTFIFO_S 4
2151#define PKTFIFO_M 0xfU
2152#define PKTFIFO_V(x) ((x) << PKTFIFO_S)
2153
2154#define FILTMEM_S 0
2155#define FILTMEM_M 0xfU
2156#define FILTMEM_V(x) ((x) << FILTMEM_S)
2157
2158#define MPS_CLS_INT_CAUSE_A 0xd028
2159
2160#define HASHSRAM_S 2
2161#define HASHSRAM_V(x) ((x) << HASHSRAM_S)
2162#define HASHSRAM_F HASHSRAM_V(1U)
2163
2164#define MATCHTCAM_S 1
2165#define MATCHTCAM_V(x) ((x) << MATCHTCAM_S)
2166#define MATCHTCAM_F MATCHTCAM_V(1U)
2167
2168#define MATCHSRAM_S 0
2169#define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
2170#define MATCHSRAM_F MATCHSRAM_V(1U)
2171
2172#define MPS_RX_PERR_INT_CAUSE_A 0x11074
2173
2174#define MPS_CLS_TCAM_Y_L_A 0xf000
2175#define MPS_CLS_TCAM_X_L_A 0xf008
2176
2177#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
2178#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
2179
2180#define MPS_CLS_TCAM_X_L(idx) (MPS_CLS_TCAM_X_L_A + (idx) * 16)
2181#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
2182
2183#define MPS_CLS_SRAM_L_A 0xe000
2184#define MPS_CLS_SRAM_H_A 0xe004
2185
2186#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
2187#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
2188
2189#define MPS_CLS_SRAM_H(idx) (MPS_CLS_SRAM_H_A + (idx) * 8)
2190#define NUM_MPS_CLS_SRAM_H_INSTANCES 336
2191
2192#define MULTILISTEN0_S 25
2193
2194#define REPLICATE_S 11
2195#define REPLICATE_V(x) ((x) << REPLICATE_S)
2196#define REPLICATE_F REPLICATE_V(1U)
2197
2198#define PF_S 8
2199#define PF_M 0x7U
2200#define PF_G(x) (((x) >> PF_S) & PF_M)
2201
2202#define VF_VALID_S 7
2203#define VF_VALID_V(x) ((x) << VF_VALID_S)
2204#define VF_VALID_F VF_VALID_V(1U)
2205
2206#define VF_S 0
2207#define VF_M 0x7fU
2208#define VF_G(x) (((x) >> VF_S) & VF_M)
2209
2210#define SRAM_PRIO3_S 22
2211#define SRAM_PRIO3_M 0x7U
2212#define SRAM_PRIO3_G(x) (((x) >> SRAM_PRIO3_S) & SRAM_PRIO3_M)
2213
2214#define SRAM_PRIO2_S 19
2215#define SRAM_PRIO2_M 0x7U
2216#define SRAM_PRIO2_G(x) (((x) >> SRAM_PRIO2_S) & SRAM_PRIO2_M)
2217
2218#define SRAM_PRIO1_S 16
2219#define SRAM_PRIO1_M 0x7U
2220#define SRAM_PRIO1_G(x) (((x) >> SRAM_PRIO1_S) & SRAM_PRIO1_M)
2221
2222#define SRAM_PRIO0_S 13
2223#define SRAM_PRIO0_M 0x7U
2224#define SRAM_PRIO0_G(x) (((x) >> SRAM_PRIO0_S) & SRAM_PRIO0_M)
2225
2226#define SRAM_VLD_S 12
2227#define SRAM_VLD_V(x) ((x) << SRAM_VLD_S)
2228#define SRAM_VLD_F SRAM_VLD_V(1U)
2229
2230#define PORTMAP_S 0
2231#define PORTMAP_M 0xfU
2232#define PORTMAP_G(x) (((x) >> PORTMAP_S) & PORTMAP_M)
2233
2234#define CPL_INTR_CAUSE_A 0x19054
2235
2236#define CIM_OP_MAP_PERR_S 5
2237#define CIM_OP_MAP_PERR_V(x) ((x) << CIM_OP_MAP_PERR_S)
2238#define CIM_OP_MAP_PERR_F CIM_OP_MAP_PERR_V(1U)
2239
2240#define CIM_OVFL_ERROR_S 4
2241#define CIM_OVFL_ERROR_V(x) ((x) << CIM_OVFL_ERROR_S)
2242#define CIM_OVFL_ERROR_F CIM_OVFL_ERROR_V(1U)
2243
2244#define TP_FRAMING_ERROR_S 3
2245#define TP_FRAMING_ERROR_V(x) ((x) << TP_FRAMING_ERROR_S)
2246#define TP_FRAMING_ERROR_F TP_FRAMING_ERROR_V(1U)
2247
2248#define SGE_FRAMING_ERROR_S 2
2249#define SGE_FRAMING_ERROR_V(x) ((x) << SGE_FRAMING_ERROR_S)
2250#define SGE_FRAMING_ERROR_F SGE_FRAMING_ERROR_V(1U)
2251
2252#define CIM_FRAMING_ERROR_S 1
2253#define CIM_FRAMING_ERROR_V(x) ((x) << CIM_FRAMING_ERROR_S)
2254#define CIM_FRAMING_ERROR_F CIM_FRAMING_ERROR_V(1U)
2255
2256#define ZERO_SWITCH_ERROR_S 0
2257#define ZERO_SWITCH_ERROR_V(x) ((x) << ZERO_SWITCH_ERROR_S)
2258#define ZERO_SWITCH_ERROR_F ZERO_SWITCH_ERROR_V(1U)
2259
2260#define SMB_INT_CAUSE_A 0x19090
2261
2262#define MSTTXFIFOPARINT_S 21
2263#define MSTTXFIFOPARINT_V(x) ((x) << MSTTXFIFOPARINT_S)
2264#define MSTTXFIFOPARINT_F MSTTXFIFOPARINT_V(1U)
2265
2266#define MSTRXFIFOPARINT_S 20
2267#define MSTRXFIFOPARINT_V(x) ((x) << MSTRXFIFOPARINT_S)
2268#define MSTRXFIFOPARINT_F MSTRXFIFOPARINT_V(1U)
2269
2270#define SLVFIFOPARINT_S 19
2271#define SLVFIFOPARINT_V(x) ((x) << SLVFIFOPARINT_S)
2272#define SLVFIFOPARINT_F SLVFIFOPARINT_V(1U)
2273
2274#define ULP_RX_INT_CAUSE_A 0x19158
2275#define ULP_RX_ISCSI_TAGMASK_A 0x19164
2276#define ULP_RX_ISCSI_PSZ_A 0x19168
2277#define ULP_RX_LA_CTL_A 0x1923c
2278#define ULP_RX_LA_RDPTR_A 0x19240
2279#define ULP_RX_LA_RDDATA_A 0x19244
2280#define ULP_RX_LA_WRPTR_A 0x19248
2281
2282#define HPZ3_S 24
2283#define HPZ3_V(x) ((x) << HPZ3_S)
2284
2285#define HPZ2_S 16
2286#define HPZ2_V(x) ((x) << HPZ2_S)
2287
2288#define HPZ1_S 8
2289#define HPZ1_V(x) ((x) << HPZ1_S)
2290
2291#define HPZ0_S 0
2292#define HPZ0_V(x) ((x) << HPZ0_S)
2293
2294#define ULP_RX_TDDP_PSZ_A 0x19178
2295
2296/* registers for module SF */
2297#define SF_DATA_A 0x193f8
2298#define SF_OP_A 0x193fc
2299
2300#define SF_BUSY_S 31
2301#define SF_BUSY_V(x) ((x) << SF_BUSY_S)
2302#define SF_BUSY_F SF_BUSY_V(1U)
2303
2304#define SF_LOCK_S 4
2305#define SF_LOCK_V(x) ((x) << SF_LOCK_S)
2306#define SF_LOCK_F SF_LOCK_V(1U)
2307
2308#define SF_CONT_S 3
2309#define SF_CONT_V(x) ((x) << SF_CONT_S)
2310#define SF_CONT_F SF_CONT_V(1U)
2311
2312#define BYTECNT_S 1
2313#define BYTECNT_V(x) ((x) << BYTECNT_S)
2314
2315#define OP_S 0
2316#define OP_V(x) ((x) << OP_S)
2317#define OP_F OP_V(1U)
2318
2319#define PL_PF_INT_CAUSE_A 0x3c0
2320
2321#define PFSW_S 3
2322#define PFSW_V(x) ((x) << PFSW_S)
2323#define PFSW_F PFSW_V(1U)
2324
2325#define PFCIM_S 1
2326#define PFCIM_V(x) ((x) << PFCIM_S)
2327#define PFCIM_F PFCIM_V(1U)
2328
2329#define PL_PF_INT_ENABLE_A 0x3c4
2330#define PL_PF_CTL_A 0x3c8
2331
2332#define PL_WHOAMI_A 0x19400
2333
2334#define SOURCEPF_S 8
2335#define SOURCEPF_M 0x7U
2336#define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
2337
2338#define PL_INT_CAUSE_A 0x1940c
2339
2340#define ULP_TX_S 27
2341#define ULP_TX_V(x) ((x) << ULP_TX_S)
2342#define ULP_TX_F ULP_TX_V(1U)
2343
2344#define SGE_S 26
2345#define SGE_V(x) ((x) << SGE_S)
2346#define SGE_F SGE_V(1U)
2347
2348#define CPL_SWITCH_S 24
2349#define CPL_SWITCH_V(x) ((x) << CPL_SWITCH_S)
2350#define CPL_SWITCH_F CPL_SWITCH_V(1U)
2351
2352#define ULP_RX_S 23
2353#define ULP_RX_V(x) ((x) << ULP_RX_S)
2354#define ULP_RX_F ULP_RX_V(1U)
2355
2356#define PM_RX_S 22
2357#define PM_RX_V(x) ((x) << PM_RX_S)
2358#define PM_RX_F PM_RX_V(1U)
2359
2360#define PM_TX_S 21
2361#define PM_TX_V(x) ((x) << PM_TX_S)
2362#define PM_TX_F PM_TX_V(1U)
2363
2364#define MA_S 20
2365#define MA_V(x) ((x) << MA_S)
2366#define MA_F MA_V(1U)
2367
2368#define TP_S 19
2369#define TP_V(x) ((x) << TP_S)
2370#define TP_F TP_V(1U)
2371
2372#define LE_S 18
2373#define LE_V(x) ((x) << LE_S)
2374#define LE_F LE_V(1U)
2375
2376#define EDC1_S 17
2377#define EDC1_V(x) ((x) << EDC1_S)
2378#define EDC1_F EDC1_V(1U)
2379
2380#define EDC0_S 16
2381#define EDC0_V(x) ((x) << EDC0_S)
2382#define EDC0_F EDC0_V(1U)
2383
2384#define MC_S 15
2385#define MC_V(x) ((x) << MC_S)
2386#define MC_F MC_V(1U)
2387
2388#define PCIE_S 14
2389#define PCIE_V(x) ((x) << PCIE_S)
2390#define PCIE_F PCIE_V(1U)
2391
2392#define XGMAC_KR1_S 12
2393#define XGMAC_KR1_V(x) ((x) << XGMAC_KR1_S)
2394#define XGMAC_KR1_F XGMAC_KR1_V(1U)
2395
2396#define XGMAC_KR0_S 11
2397#define XGMAC_KR0_V(x) ((x) << XGMAC_KR0_S)
2398#define XGMAC_KR0_F XGMAC_KR0_V(1U)
2399
2400#define XGMAC1_S 10
2401#define XGMAC1_V(x) ((x) << XGMAC1_S)
2402#define XGMAC1_F XGMAC1_V(1U)
2403
2404#define XGMAC0_S 9
2405#define XGMAC0_V(x) ((x) << XGMAC0_S)
2406#define XGMAC0_F XGMAC0_V(1U)
2407
2408#define SMB_S 8
2409#define SMB_V(x) ((x) << SMB_S)
2410#define SMB_F SMB_V(1U)
2411
2412#define SF_S 7
2413#define SF_V(x) ((x) << SF_S)
2414#define SF_F SF_V(1U)
2415
2416#define PL_S 6
2417#define PL_V(x) ((x) << PL_S)
2418#define PL_F PL_V(1U)
2419
2420#define NCSI_S 5
2421#define NCSI_V(x) ((x) << NCSI_S)
2422#define NCSI_F NCSI_V(1U)
2423
2424#define MPS_S 4
2425#define MPS_V(x) ((x) << MPS_S)
2426#define MPS_F MPS_V(1U)
2427
2428#define CIM_S 0
2429#define CIM_V(x) ((x) << CIM_S)
2430#define CIM_F CIM_V(1U)
2431
2432#define MC1_S 31
2433
2434#define PL_INT_ENABLE_A 0x19410
2435#define PL_INT_MAP0_A 0x19414
2436#define PL_RST_A 0x19428
2437
2438#define PIORST_S 1
2439#define PIORST_V(x) ((x) << PIORST_S)
2440#define PIORST_F PIORST_V(1U)
2441
2442#define PIORSTMODE_S 0
2443#define PIORSTMODE_V(x) ((x) << PIORSTMODE_S)
2444#define PIORSTMODE_F PIORSTMODE_V(1U)
2445
2446#define PL_PL_INT_CAUSE_A 0x19430
2447
2448#define FATALPERR_S 4
2449#define FATALPERR_V(x) ((x) << FATALPERR_S)
2450#define FATALPERR_F FATALPERR_V(1U)
2451
2452#define PERRVFID_S 0
2453#define PERRVFID_V(x) ((x) << PERRVFID_S)
2454#define PERRVFID_F PERRVFID_V(1U)
2455
2456#define PL_REV_A 0x1943c
2457
2458#define REV_S 0
2459#define REV_M 0xfU
2460#define REV_V(x) ((x) << REV_S)
2461#define REV_G(x) (((x) >> REV_S) & REV_M)
2462
2463#define LE_DB_INT_CAUSE_A 0x19c3c
2464
2465#define REQQPARERR_S 16
2466#define REQQPARERR_V(x) ((x) << REQQPARERR_S)
2467#define REQQPARERR_F REQQPARERR_V(1U)
2468
2469#define UNKNOWNCMD_S 15
2470#define UNKNOWNCMD_V(x) ((x) << UNKNOWNCMD_S)
2471#define UNKNOWNCMD_F UNKNOWNCMD_V(1U)
2472
2473#define PARITYERR_S 6
2474#define PARITYERR_V(x) ((x) << PARITYERR_S)
2475#define PARITYERR_F PARITYERR_V(1U)
2476
2477#define LIPMISS_S 5
2478#define LIPMISS_V(x) ((x) << LIPMISS_S)
2479#define LIPMISS_F LIPMISS_V(1U)
2480
2481#define LIP0_S 4
2482#define LIP0_V(x) ((x) << LIP0_S)
2483#define LIP0_F LIP0_V(1U)
2484
2485#define NCSI_INT_CAUSE_A 0x1a0d8
2486
2487#define CIM_DM_PRTY_ERR_S 8
2488#define CIM_DM_PRTY_ERR_V(x) ((x) << CIM_DM_PRTY_ERR_S)
2489#define CIM_DM_PRTY_ERR_F CIM_DM_PRTY_ERR_V(1U)
2490
2491#define MPS_DM_PRTY_ERR_S 7
2492#define MPS_DM_PRTY_ERR_V(x) ((x) << MPS_DM_PRTY_ERR_S)
2493#define MPS_DM_PRTY_ERR_F MPS_DM_PRTY_ERR_V(1U)
2494
2495#define TXFIFO_PRTY_ERR_S 1
2496#define TXFIFO_PRTY_ERR_V(x) ((x) << TXFIFO_PRTY_ERR_S)
2497#define TXFIFO_PRTY_ERR_F TXFIFO_PRTY_ERR_V(1U)
2498
2499#define RXFIFO_PRTY_ERR_S 0
2500#define RXFIFO_PRTY_ERR_V(x) ((x) << RXFIFO_PRTY_ERR_S)
2501#define RXFIFO_PRTY_ERR_F RXFIFO_PRTY_ERR_V(1U)
2502
2503#define XGMAC_PORT_CFG2_A 0x1018
2504
2505#define PATEN_S 18
2506#define PATEN_V(x) ((x) << PATEN_S)
2507#define PATEN_F PATEN_V(1U)
2508
2509#define MAGICEN_S 17
2510#define MAGICEN_V(x) ((x) << MAGICEN_S)
2511#define MAGICEN_F MAGICEN_V(1U)
2512
2513#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
2514#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
2515
2516#define XGMAC_PORT_EPIO_DATA0_A 0x10c0
2517#define XGMAC_PORT_EPIO_DATA1_A 0x10c4
2518#define XGMAC_PORT_EPIO_DATA2_A 0x10c8
2519#define XGMAC_PORT_EPIO_DATA3_A 0x10cc
2520#define XGMAC_PORT_EPIO_OP_A 0x10d0
2521
2522#define EPIOWR_S 8
2523#define EPIOWR_V(x) ((x) << EPIOWR_S)
2524#define EPIOWR_F EPIOWR_V(1U)
2525
2526#define ADDRESS_S 0
2527#define ADDRESS_V(x) ((x) << ADDRESS_S)
2528
2529#define MAC_PORT_INT_CAUSE_A 0x8dc
2530#define XGMAC_PORT_INT_CAUSE_A 0x10dc
2531
2532#define TP_TX_MOD_QUEUE_REQ_MAP_A 0x7e28
2533
2534#define TP_TX_MOD_QUEUE_WEIGHT0_A 0x7e30
2535#define TP_TX_MOD_CHANNEL_WEIGHT_A 0x7e34
2536
2537#define TX_MOD_QUEUE_REQ_MAP_S 0
2538#define TX_MOD_QUEUE_REQ_MAP_V(x) ((x) << TX_MOD_QUEUE_REQ_MAP_S)
2539
2540#define TX_MODQ_WEIGHT3_S 24
2541#define TX_MODQ_WEIGHT3_V(x) ((x) << TX_MODQ_WEIGHT3_S)
2542
2543#define TX_MODQ_WEIGHT2_S 16
2544#define TX_MODQ_WEIGHT2_V(x) ((x) << TX_MODQ_WEIGHT2_S)
2545
2546#define TX_MODQ_WEIGHT1_S 8
2547#define TX_MODQ_WEIGHT1_V(x) ((x) << TX_MODQ_WEIGHT1_S)
2548
2549#define TX_MODQ_WEIGHT0_S 0
2550#define TX_MODQ_WEIGHT0_V(x) ((x) << TX_MODQ_WEIGHT0_S)
2551
2552#define TP_TX_SCHED_HDR_A 0x23
2553#define TP_TX_SCHED_FIFO_A 0x24
2554#define TP_TX_SCHED_PCMD_A 0x25
1318 2555
1319#define NUM_MPS_CLS_SRAM_L_INSTANCES 336 2556#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
1320#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 2557#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
@@ -1329,62 +2566,149 @@
1329#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR) 2566#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
1330#define MC_REG(reg, idx) (reg + MC_STRIDE * idx) 2567#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
1331 2568
1332#define MC_P_BIST_CMD 0x41400 2569#define MC_P_BIST_CMD_A 0x41400
1333#define MC_P_BIST_CMD_ADDR 0x41404 2570#define MC_P_BIST_CMD_ADDR_A 0x41404
1334#define MC_P_BIST_CMD_LEN 0x41408 2571#define MC_P_BIST_CMD_LEN_A 0x41408
1335#define MC_P_BIST_DATA_PATTERN 0x4140c 2572#define MC_P_BIST_DATA_PATTERN_A 0x4140c
1336#define MC_P_BIST_STATUS_RDATA 0x41488 2573#define MC_P_BIST_STATUS_RDATA_A 0x41488
1337#define EDC_T50_BASE_ADDR 0x50000 2574
1338#define EDC_H_BIST_CMD 0x50004 2575#define EDC_T50_BASE_ADDR 0x50000
1339#define EDC_H_BIST_CMD_ADDR 0x50008 2576
1340#define EDC_H_BIST_CMD_LEN 0x5000c 2577#define EDC_H_BIST_CMD_A 0x50004
1341#define EDC_H_BIST_DATA_PATTERN 0x50010 2578#define EDC_H_BIST_CMD_ADDR_A 0x50008
1342#define EDC_H_BIST_STATUS_RDATA 0x50028 2579#define EDC_H_BIST_CMD_LEN_A 0x5000c
1343 2580#define EDC_H_BIST_DATA_PATTERN_A 0x50010
1344#define EDC_T51_BASE_ADDR 0x50800 2581#define EDC_H_BIST_STATUS_RDATA_A 0x50028
2582
2583#define EDC_T51_BASE_ADDR 0x50800
2584
1345#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 2585#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
1346#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 2586#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
1347 2587
1348#define A_PL_VF_REV 0x4 2588#define PL_VF_REV_A 0x4
1349#define A_PL_VF_WHOAMI 0x0 2589#define PL_VF_WHOAMI_A 0x0
1350#define A_PL_VF_REVISION 0x8 2590#define PL_VF_REVISION_A 0x8
1351 2591
1352#define S_CHIPID 4 2592/* registers for module CIM */
1353#define M_CHIPID 0xfU 2593#define CIM_HOST_ACC_CTRL_A 0x7b50
1354#define V_CHIPID(x) ((x) << S_CHIPID) 2594#define CIM_HOST_ACC_DATA_A 0x7b54
1355#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) 2595#define UP_UP_DBG_LA_CFG_A 0x140
2596#define UP_UP_DBG_LA_DATA_A 0x144
1356 2597
1357/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the 2598#define HOSTBUSY_S 17
1358 * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP 2599#define HOSTBUSY_V(x) ((x) << HOSTBUSY_S)
1359 * selects for a particular field being present. These fields, when present 2600#define HOSTBUSY_F HOSTBUSY_V(1U)
1360 * in the Compressed Filter Tuple, have the following widths in bits. 2601
1361 */ 2602#define HOSTWRITE_S 16
1362#define W_FT_FCOE 1 2603#define HOSTWRITE_V(x) ((x) << HOSTWRITE_S)
1363#define W_FT_PORT 3 2604#define HOSTWRITE_F HOSTWRITE_V(1U)
1364#define W_FT_VNIC_ID 17 2605
1365#define W_FT_VLAN 17 2606#define CIM_IBQ_DBG_CFG_A 0x7b60
1366#define W_FT_TOS 8 2607
1367#define W_FT_PROTOCOL 8 2608#define IBQDBGADDR_S 16
1368#define W_FT_ETHERTYPE 16 2609#define IBQDBGADDR_M 0xfffU
1369#define W_FT_MACMATCH 9 2610#define IBQDBGADDR_V(x) ((x) << IBQDBGADDR_S)
1370#define W_FT_MPSHITTYPE 3 2611#define IBQDBGADDR_G(x) (((x) >> IBQDBGADDR_S) & IBQDBGADDR_M)
1371#define W_FT_FRAGMENTATION 1 2612
1372 2613#define IBQDBGBUSY_S 1
1373/* Some of the Compressed Filter Tuple fields have internal structure. These 2614#define IBQDBGBUSY_V(x) ((x) << IBQDBGBUSY_S)
1374 * bit shifts/masks describe those structures. All shifts are relative to the 2615#define IBQDBGBUSY_F IBQDBGBUSY_V(1U)
1375 * base position of the fields within the Compressed Filter Tuple 2616
1376 */ 2617#define IBQDBGEN_S 0
1377#define S_FT_VLAN_VLD 16 2618#define IBQDBGEN_V(x) ((x) << IBQDBGEN_S)
1378#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD) 2619#define IBQDBGEN_F IBQDBGEN_V(1U)
1379#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U) 2620
2621#define CIM_OBQ_DBG_CFG_A 0x7b64
2622
2623#define OBQDBGADDR_S 16
2624#define OBQDBGADDR_M 0xfffU
2625#define OBQDBGADDR_V(x) ((x) << OBQDBGADDR_S)
2626#define OBQDBGADDR_G(x) (((x) >> OBQDBGADDR_S) & OBQDBGADDR_M)
2627
2628#define OBQDBGBUSY_S 1
2629#define OBQDBGBUSY_V(x) ((x) << OBQDBGBUSY_S)
2630#define OBQDBGBUSY_F OBQDBGBUSY_V(1U)
2631
2632#define OBQDBGEN_S 0
2633#define OBQDBGEN_V(x) ((x) << OBQDBGEN_S)
2634#define OBQDBGEN_F OBQDBGEN_V(1U)
2635
2636#define CIM_IBQ_DBG_DATA_A 0x7b68
2637#define CIM_OBQ_DBG_DATA_A 0x7b6c
2638
2639#define UPDBGLARDEN_S 1
2640#define UPDBGLARDEN_V(x) ((x) << UPDBGLARDEN_S)
2641#define UPDBGLARDEN_F UPDBGLARDEN_V(1U)
2642
2643#define UPDBGLAEN_S 0
2644#define UPDBGLAEN_V(x) ((x) << UPDBGLAEN_S)
2645#define UPDBGLAEN_F UPDBGLAEN_V(1U)
2646
2647#define UPDBGLARDPTR_S 2
2648#define UPDBGLARDPTR_M 0xfffU
2649#define UPDBGLARDPTR_V(x) ((x) << UPDBGLARDPTR_S)
2650
2651#define UPDBGLAWRPTR_S 16
2652#define UPDBGLAWRPTR_M 0xfffU
2653#define UPDBGLAWRPTR_G(x) (((x) >> UPDBGLAWRPTR_S) & UPDBGLAWRPTR_M)
2654
2655#define UPDBGLACAPTPCONLY_S 30
2656#define UPDBGLACAPTPCONLY_V(x) ((x) << UPDBGLACAPTPCONLY_S)
2657#define UPDBGLACAPTPCONLY_F UPDBGLACAPTPCONLY_V(1U)
2658
2659#define CIM_QUEUE_CONFIG_REF_A 0x7b48
2660#define CIM_QUEUE_CONFIG_CTRL_A 0x7b4c
2661
2662#define CIMQSIZE_S 24
2663#define CIMQSIZE_M 0x3fU
2664#define CIMQSIZE_G(x) (((x) >> CIMQSIZE_S) & CIMQSIZE_M)
2665
2666#define CIMQBASE_S 16
2667#define CIMQBASE_M 0x3fU
2668#define CIMQBASE_G(x) (((x) >> CIMQBASE_S) & CIMQBASE_M)
2669
2670#define QUEFULLTHRSH_S 0
2671#define QUEFULLTHRSH_M 0x1ffU
2672#define QUEFULLTHRSH_G(x) (((x) >> QUEFULLTHRSH_S) & QUEFULLTHRSH_M)
2673
2674#define UP_IBQ_0_RDADDR_A 0x10
2675#define UP_IBQ_0_SHADOW_RDADDR_A 0x280
2676#define UP_OBQ_0_REALADDR_A 0x104
2677#define UP_OBQ_0_SHADOW_REALADDR_A 0x394
2678
2679#define IBQRDADDR_S 0
2680#define IBQRDADDR_M 0x1fffU
2681#define IBQRDADDR_G(x) (((x) >> IBQRDADDR_S) & IBQRDADDR_M)
2682
2683#define IBQWRADDR_S 0
2684#define IBQWRADDR_M 0x1fffU
2685#define IBQWRADDR_G(x) (((x) >> IBQWRADDR_S) & IBQWRADDR_M)
2686
2687#define QUERDADDR_S 0
2688#define QUERDADDR_M 0x7fffU
2689#define QUERDADDR_G(x) (((x) >> QUERDADDR_S) & QUERDADDR_M)
2690
2691#define QUEREMFLITS_S 0
2692#define QUEREMFLITS_M 0x7ffU
2693#define QUEREMFLITS_G(x) (((x) >> QUEREMFLITS_S) & QUEREMFLITS_M)
2694
2695#define QUEEOPCNT_S 16
2696#define QUEEOPCNT_M 0xfffU
2697#define QUEEOPCNT_G(x) (((x) >> QUEEOPCNT_S) & QUEEOPCNT_M)
2698
2699#define QUESOPCNT_S 0
2700#define QUESOPCNT_M 0xfffU
2701#define QUESOPCNT_G(x) (((x) >> QUESOPCNT_S) & QUESOPCNT_M)
1380 2702
1381#define S_FT_VNID_ID_VF 0 2703#define OBQSELECT_S 4
1382#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF) 2704#define OBQSELECT_V(x) ((x) << OBQSELECT_S)
2705#define OBQSELECT_F OBQSELECT_V(1U)
1383 2706
1384#define S_FT_VNID_ID_PF 7 2707#define IBQSELECT_S 3
1385#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF) 2708#define IBQSELECT_V(x) ((x) << IBQSELECT_S)
2709#define IBQSELECT_F IBQSELECT_V(1U)
1386 2710
1387#define S_FT_VNID_ID_VLD 16 2711#define QUENUMSELECT_S 0
1388#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD) 2712#define QUENUMSELECT_V(x) ((x) << QUENUMSELECT_S)
1389 2713
1390#endif /* __T4_REGS_H */ 2714#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
new file mode 100644
index 000000000000..19b2dcf6acde
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -0,0 +1,124 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4_VALUES_H__
36#define __T4_VALUES_H__
37
38/* This file contains definitions for various T4 register value hardware
39 * constants. The types of values encoded here are predominantly those for
40 * register fields which control "modal" behavior. For the most part, we do
41 * not include definitions for register fields which are simple numeric
42 * metrics, etc.
43 */
44
45/* SGE register field values.
46 */
47
48/* CONTROL1 register */
49#define RXPKTCPLMODE_SPLIT_X 1
50
51#define INGPCIEBOUNDARY_SHIFT_X 5
52#define INGPCIEBOUNDARY_32B_X 0
53
54#define INGPADBOUNDARY_SHIFT_X 5
55
56/* CONTROL2 register */
57#define INGPACKBOUNDARY_SHIFT_X 5
58#define INGPACKBOUNDARY_16B_X 0
59
60/* GTS register */
61#define SGE_TIMERREGS 6
62#define TIMERREG_COUNTER0_X 0
63
64/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
65 * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
66 * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
67 * (IDXSIZE_UNIT_X) Gather Buffer interface at offset 64. For Ingress Queues,
68 * we have a Going To Sleep register at offsets 8x+4.
69 *
70 * As noted above, we have many instances of the Simple Doorbell and Going To
71 * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a
72 * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
73 * avoid buffering of the writes to the Simple Doorbell and we want to use a
74 * non-contiguous offset for the Going To Sleep writes in order to avoid
75 * possible combining between them.
76 */
77#define SGE_UDB_SIZE 128
78#define SGE_UDB_KDOORBELL 8
79#define SGE_UDB_GTS 20
80#define SGE_UDB_WCDOORBELL 64
81
82/* CIM register field values.
83 */
84#define X_MBOWNER_FW 1
85#define X_MBOWNER_PL 2
86
87/* PCI-E definitions */
88#define WINDOW_SHIFT_X 10
89#define PCIEOFST_SHIFT_X 10
90
91/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
92 * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
93 * selects for a particular field being present. These fields, when present
94 * in the Compressed Filter Tuple, have the following widths in bits.
95 */
96#define FT_FCOE_W 1
97#define FT_PORT_W 3
98#define FT_VNIC_ID_W 17
99#define FT_VLAN_W 17
100#define FT_TOS_W 8
101#define FT_PROTOCOL_W 8
102#define FT_ETHERTYPE_W 16
103#define FT_MACMATCH_W 9
104#define FT_MPSHITTYPE_W 3
105#define FT_FRAGMENTATION_W 1
106
107/* Some of the Compressed Filter Tuple fields have internal structure. These
108 * bit shifts/masks describe those structures. All shifts are relative to the
109 * base position of the fields within the Compressed Filter Tuple
110 */
111#define FT_VLAN_VLD_S 16
112#define FT_VLAN_VLD_V(x) ((x) << FT_VLAN_VLD_S)
113#define FT_VLAN_VLD_F FT_VLAN_VLD_V(1U)
114
115#define FT_VNID_ID_VF_S 0
116#define FT_VNID_ID_VF_V(x) ((x) << FT_VNID_ID_VF_S)
117
118#define FT_VNID_ID_PF_S 7
119#define FT_VNID_ID_PF_V(x) ((x) << FT_VNID_ID_PF_S)
120
121#define FT_VNID_ID_VLD_S 16
122#define FT_VNID_ID_VLD_V(x) ((x) << FT_VNID_ID_VLD_S)
123
124#endif /* __T4_VALUES_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 7c0aec85137a..9b353a88cbda 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -673,6 +673,7 @@ enum fw_cmd_opcodes {
673 FW_RSS_IND_TBL_CMD = 0x20, 673 FW_RSS_IND_TBL_CMD = 0x20,
674 FW_RSS_GLB_CONFIG_CMD = 0x22, 674 FW_RSS_GLB_CONFIG_CMD = 0x22,
675 FW_RSS_VI_CONFIG_CMD = 0x23, 675 FW_RSS_VI_CONFIG_CMD = 0x23,
676 FW_DEVLOG_CMD = 0x25,
676 FW_CLIP_CMD = 0x28, 677 FW_CLIP_CMD = 0x28,
677 FW_LASTC2E_CMD = 0x40, 678 FW_LASTC2E_CMD = 0x40,
678 FW_ERROR_CMD = 0x80, 679 FW_ERROR_CMD = 0x80,
@@ -1058,9 +1059,11 @@ enum fw_params_param_dev {
1058 FW_PARAMS_PARAM_DEV_FWREV = 0x0B, 1059 FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
1059 FW_PARAMS_PARAM_DEV_TPREV = 0x0C, 1060 FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
1060 FW_PARAMS_PARAM_DEV_CF = 0x0D, 1061 FW_PARAMS_PARAM_DEV_CF = 0x0D,
1062 FW_PARAMS_PARAM_DEV_DIAG = 0x11,
1061 FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */ 1063 FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
1062 FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */ 1064 FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
1063 FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, 1065 FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
1066 FW_PARAMS_PARAM_DEV_FWCACHE = 0x18,
1064}; 1067};
1065 1068
1066/* 1069/*
@@ -1120,6 +1123,16 @@ enum fw_params_param_dmaq {
1120 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13, 1123 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
1121}; 1124};
1122 1125
1126enum fw_params_param_dev_diag {
1127 FW_PARAM_DEV_DIAG_TMP = 0x00,
1128 FW_PARAM_DEV_DIAG_VDD = 0x01,
1129};
1130
1131enum fw_params_param_dev_fwcache {
1132 FW_PARAM_DEV_FWCACHE_FLUSH = 0x00,
1133 FW_PARAM_DEV_FWCACHE_FLUSHINV = 0x01,
1134};
1135
1123#define FW_PARAMS_MNEM_S 24 1136#define FW_PARAMS_MNEM_S 24
1124#define FW_PARAMS_MNEM_V(x) ((x) << FW_PARAMS_MNEM_S) 1137#define FW_PARAMS_MNEM_V(x) ((x) << FW_PARAMS_MNEM_S)
1125 1138
@@ -3005,21 +3018,29 @@ enum fw_hdr_chip {
3005 3018
3006#define FW_HDR_FW_VER_MAJOR_S 24 3019#define FW_HDR_FW_VER_MAJOR_S 24
3007#define FW_HDR_FW_VER_MAJOR_M 0xff 3020#define FW_HDR_FW_VER_MAJOR_M 0xff
3021#define FW_HDR_FW_VER_MAJOR_V(x) \
3022 ((x) << FW_HDR_FW_VER_MAJOR_S)
3008#define FW_HDR_FW_VER_MAJOR_G(x) \ 3023#define FW_HDR_FW_VER_MAJOR_G(x) \
3009 (((x) >> FW_HDR_FW_VER_MAJOR_S) & FW_HDR_FW_VER_MAJOR_M) 3024 (((x) >> FW_HDR_FW_VER_MAJOR_S) & FW_HDR_FW_VER_MAJOR_M)
3010 3025
3011#define FW_HDR_FW_VER_MINOR_S 16 3026#define FW_HDR_FW_VER_MINOR_S 16
3012#define FW_HDR_FW_VER_MINOR_M 0xff 3027#define FW_HDR_FW_VER_MINOR_M 0xff
3028#define FW_HDR_FW_VER_MINOR_V(x) \
3029 ((x) << FW_HDR_FW_VER_MINOR_S)
3013#define FW_HDR_FW_VER_MINOR_G(x) \ 3030#define FW_HDR_FW_VER_MINOR_G(x) \
3014 (((x) >> FW_HDR_FW_VER_MINOR_S) & FW_HDR_FW_VER_MINOR_M) 3031 (((x) >> FW_HDR_FW_VER_MINOR_S) & FW_HDR_FW_VER_MINOR_M)
3015 3032
3016#define FW_HDR_FW_VER_MICRO_S 8 3033#define FW_HDR_FW_VER_MICRO_S 8
3017#define FW_HDR_FW_VER_MICRO_M 0xff 3034#define FW_HDR_FW_VER_MICRO_M 0xff
3035#define FW_HDR_FW_VER_MICRO_V(x) \
3036 ((x) << FW_HDR_FW_VER_MICRO_S)
3018#define FW_HDR_FW_VER_MICRO_G(x) \ 3037#define FW_HDR_FW_VER_MICRO_G(x) \
3019 (((x) >> FW_HDR_FW_VER_MICRO_S) & FW_HDR_FW_VER_MICRO_M) 3038 (((x) >> FW_HDR_FW_VER_MICRO_S) & FW_HDR_FW_VER_MICRO_M)
3020 3039
3021#define FW_HDR_FW_VER_BUILD_S 0 3040#define FW_HDR_FW_VER_BUILD_S 0
3022#define FW_HDR_FW_VER_BUILD_M 0xff 3041#define FW_HDR_FW_VER_BUILD_M 0xff
3042#define FW_HDR_FW_VER_BUILD_V(x) \
3043 ((x) << FW_HDR_FW_VER_BUILD_S)
3023#define FW_HDR_FW_VER_BUILD_G(x) \ 3044#define FW_HDR_FW_VER_BUILD_G(x) \
3024 (((x) >> FW_HDR_FW_VER_BUILD_S) & FW_HDR_FW_VER_BUILD_M) 3045 (((x) >> FW_HDR_FW_VER_BUILD_S) & FW_HDR_FW_VER_BUILD_M)
3025 3046
@@ -3038,4 +3059,84 @@ enum fw_hdr_flags {
3038 FW_HDR_FLAGS_RESET_HALT = 0x00000001, 3059 FW_HDR_FLAGS_RESET_HALT = 0x00000001,
3039}; 3060};
3040 3061
3062/* length of the formatting string */
3063#define FW_DEVLOG_FMT_LEN 192
3064
3065/* maximum number of the formatting string parameters */
3066#define FW_DEVLOG_FMT_PARAMS_NUM 8
3067
3068/* priority levels */
3069enum fw_devlog_level {
3070 FW_DEVLOG_LEVEL_EMERG = 0x0,
3071 FW_DEVLOG_LEVEL_CRIT = 0x1,
3072 FW_DEVLOG_LEVEL_ERR = 0x2,
3073 FW_DEVLOG_LEVEL_NOTICE = 0x3,
3074 FW_DEVLOG_LEVEL_INFO = 0x4,
3075 FW_DEVLOG_LEVEL_DEBUG = 0x5,
3076 FW_DEVLOG_LEVEL_MAX = 0x5,
3077};
3078
3079/* facilities that may send a log message */
3080enum fw_devlog_facility {
3081 FW_DEVLOG_FACILITY_CORE = 0x00,
3082 FW_DEVLOG_FACILITY_CF = 0x01,
3083 FW_DEVLOG_FACILITY_SCHED = 0x02,
3084 FW_DEVLOG_FACILITY_TIMER = 0x04,
3085 FW_DEVLOG_FACILITY_RES = 0x06,
3086 FW_DEVLOG_FACILITY_HW = 0x08,
3087 FW_DEVLOG_FACILITY_FLR = 0x10,
3088 FW_DEVLOG_FACILITY_DMAQ = 0x12,
3089 FW_DEVLOG_FACILITY_PHY = 0x14,
3090 FW_DEVLOG_FACILITY_MAC = 0x16,
3091 FW_DEVLOG_FACILITY_PORT = 0x18,
3092 FW_DEVLOG_FACILITY_VI = 0x1A,
3093 FW_DEVLOG_FACILITY_FILTER = 0x1C,
3094 FW_DEVLOG_FACILITY_ACL = 0x1E,
3095 FW_DEVLOG_FACILITY_TM = 0x20,
3096 FW_DEVLOG_FACILITY_QFC = 0x22,
3097 FW_DEVLOG_FACILITY_DCB = 0x24,
3098 FW_DEVLOG_FACILITY_ETH = 0x26,
3099 FW_DEVLOG_FACILITY_OFLD = 0x28,
3100 FW_DEVLOG_FACILITY_RI = 0x2A,
3101 FW_DEVLOG_FACILITY_ISCSI = 0x2C,
3102 FW_DEVLOG_FACILITY_FCOE = 0x2E,
3103 FW_DEVLOG_FACILITY_FOISCSI = 0x30,
3104 FW_DEVLOG_FACILITY_FOFCOE = 0x32,
3105 FW_DEVLOG_FACILITY_MAX = 0x32,
3106};
3107
3108/* log message format */
3109struct fw_devlog_e {
3110 __be64 timestamp;
3111 __be32 seqno;
3112 __be16 reserved1;
3113 __u8 level;
3114 __u8 facility;
3115 __u8 fmt[FW_DEVLOG_FMT_LEN];
3116 __be32 params[FW_DEVLOG_FMT_PARAMS_NUM];
3117 __be32 reserved3[4];
3118};
3119
3120struct fw_devlog_cmd {
3121 __be32 op_to_write;
3122 __be32 retval_len16;
3123 __u8 level;
3124 __u8 r2[7];
3125 __be32 memtype_devlog_memaddr16_devlog;
3126 __be32 memsize_devlog;
3127 __be32 r3[2];
3128};
3129
3130#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S 28
3131#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M 0xf
3132#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(x) \
3133 (((x) >> FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S) & \
3134 FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M)
3135
3136#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S 0
3137#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M 0xfffffff
3138#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(x) \
3139 (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
3140 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
3141
3041#endif /* _T4FW_INTERFACE_H_ */ 3142#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
new file mode 100644
index 000000000000..e2bd3f747858
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -0,0 +1,48 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4FW_VERSION_H__
36#define __T4FW_VERSION_H__
37
38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x0C
40#define T4FW_VERSION_MICRO 0x19
41#define T4FW_VERSION_BUILD 0x00
42
43#define T5FW_VERSION_MAJOR 0x01
44#define T5FW_VERSION_MINOR 0x0C
45#define T5FW_VERSION_MICRO 0x19
46#define T5FW_VERSION_BUILD 0x00
47
48#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index a936ee8958c7..122e2964e63b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -380,9 +380,9 @@ static void qenable(struct sge_rspq *rspq)
380 * enable interrupts. 380 * enable interrupts.
381 */ 381 */
382 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, 382 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
383 CIDXINC(0) | 383 CIDXINC_V(0) |
384 SEINTARM(rspq->intr_params) | 384 SEINTARM_V(rspq->intr_params) |
385 INGRESSQID(rspq->cntxt_id)); 385 INGRESSQID_V(rspq->cntxt_id));
386} 386}
387 387
388/* 388/*
@@ -403,9 +403,9 @@ static void enable_rx(struct adapter *adapter)
403 */ 403 */
404 if (adapter->flags & USING_MSI) 404 if (adapter->flags & USING_MSI)
405 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, 405 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
406 CIDXINC(0) | 406 CIDXINC_V(0) |
407 SEINTARM(s->intrq.intr_params) | 407 SEINTARM_V(s->intrq.intr_params) |
408 INGRESSQID(s->intrq.cntxt_id)); 408 INGRESSQID_V(s->intrq.cntxt_id));
409 409
410} 410}
411 411
@@ -450,7 +450,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
450 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. 450 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
451 */ 451 */
452 const struct cpl_sge_egr_update *p = (void *)(rsp + 3); 452 const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
453 opcode = G_CPL_OPCODE(ntohl(p->opcode_qid)); 453 opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
454 if (opcode != CPL_SGE_EGR_UPDATE) { 454 if (opcode != CPL_SGE_EGR_UPDATE) {
455 dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" 455 dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
456 , opcode); 456 , opcode);
@@ -471,7 +471,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
471 * free TX Queue Descriptors ... 471 * free TX Queue Descriptors ...
472 */ 472 */
473 const struct cpl_sge_egr_update *p = cpl; 473 const struct cpl_sge_egr_update *p = cpl;
474 unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid)); 474 unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
475 struct sge *s = &adapter->sge; 475 struct sge *s = &adapter->sge;
476 struct sge_txq *tq; 476 struct sge_txq *tq;
477 struct sge_eth_txq *txq; 477 struct sge_eth_txq *txq;
@@ -1673,7 +1673,7 @@ static void cxgb4vf_get_regs(struct net_device *dev,
1673 reg_block_dump(adapter, regbuf, 1673 reg_block_dump(adapter, regbuf,
1674 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, 1674 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1675 T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip) 1675 T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1676 ? A_PL_VF_WHOAMI : A_PL_VF_REVISION)); 1676 ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
1677 reg_block_dump(adapter, regbuf, 1677 reg_block_dump(adapter, regbuf,
1678 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, 1678 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1679 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); 1679 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
@@ -2294,26 +2294,22 @@ static int adap_init0(struct adapter *adapter)
2294 * threshold values from the SGE parameters. 2294 * threshold values from the SGE parameters.
2295 */ 2295 */
2296 s->timer_val[0] = core_ticks_to_us(adapter, 2296 s->timer_val[0] = core_ticks_to_us(adapter,
2297 TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1)); 2297 TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
2298 s->timer_val[1] = core_ticks_to_us(adapter, 2298 s->timer_val[1] = core_ticks_to_us(adapter,
2299 TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1)); 2299 TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
2300 s->timer_val[2] = core_ticks_to_us(adapter, 2300 s->timer_val[2] = core_ticks_to_us(adapter,
2301 TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3)); 2301 TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
2302 s->timer_val[3] = core_ticks_to_us(adapter, 2302 s->timer_val[3] = core_ticks_to_us(adapter,
2303 TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3)); 2303 TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
2304 s->timer_val[4] = core_ticks_to_us(adapter, 2304 s->timer_val[4] = core_ticks_to_us(adapter,
2305 TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5)); 2305 TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
2306 s->timer_val[5] = core_ticks_to_us(adapter, 2306 s->timer_val[5] = core_ticks_to_us(adapter,
2307 TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5)); 2307 TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
2308 2308
2309 s->counter_val[0] = 2309 s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2310 THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold); 2310 s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2311 s->counter_val[1] = 2311 s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2312 THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold); 2312 s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
2313 s->counter_val[2] =
2314 THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
2315 s->counter_val[3] =
2316 THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
2317 2313
2318 /* 2314 /*
2319 * Grab our Virtual Interface resource allocation, extract the 2315 * Grab our Virtual Interface resource allocation, extract the
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f7fd1317d996..0545f0de1c52 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -47,6 +47,7 @@
47#include "t4vf_defs.h" 47#include "t4vf_defs.h"
48 48
49#include "../cxgb4/t4_regs.h" 49#include "../cxgb4/t4_regs.h"
50#include "../cxgb4/t4_values.h"
50#include "../cxgb4/t4fw_api.h" 51#include "../cxgb4/t4fw_api.h"
51#include "../cxgb4/t4_msg.h" 52#include "../cxgb4/t4_msg.h"
52 53
@@ -531,11 +532,11 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
531 */ 532 */
532 if (fl->pend_cred >= FL_PER_EQ_UNIT) { 533 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
533 if (is_t4(adapter->params.chip)) 534 if (is_t4(adapter->params.chip))
534 val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); 535 val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
535 else 536 else
536 val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) | 537 val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
537 DBTYPE(1); 538 DBTYPE_F;
538 val |= DBPRIO(1); 539 val |= DBPRIO_F;
539 540
540 /* Make sure all memory writes to the Free List queue are 541 /* Make sure all memory writes to the Free List queue are
541 * committed before we tell the hardware about them. 542 * committed before we tell the hardware about them.
@@ -549,9 +550,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
549 if (unlikely(fl->bar2_addr == NULL)) { 550 if (unlikely(fl->bar2_addr == NULL)) {
550 t4_write_reg(adapter, 551 t4_write_reg(adapter,
551 T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, 552 T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
552 QID(fl->cntxt_id) | val); 553 QID_V(fl->cntxt_id) | val);
553 } else { 554 } else {
554 writel(val | QID(fl->bar2_qid), 555 writel(val | QID_V(fl->bar2_qid),
555 fl->bar2_addr + SGE_UDB_KDOORBELL); 556 fl->bar2_addr + SGE_UDB_KDOORBELL);
556 557
557 /* This Write memory Barrier will force the write to 558 /* This Write memory Barrier will force the write to
@@ -925,7 +926,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
925 } 926 }
926 927
927 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 928 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
928 ULPTX_NSGE(nfrags)); 929 ULPTX_NSGE_V(nfrags));
929 if (likely(--nfrags == 0)) 930 if (likely(--nfrags == 0))
930 return; 931 return;
931 /* 932 /*
@@ -979,12 +980,12 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
979 * doorbell mechanism; otherwise use the new BAR2 mechanism. 980 * doorbell mechanism; otherwise use the new BAR2 mechanism.
980 */ 981 */
981 if (unlikely(tq->bar2_addr == NULL)) { 982 if (unlikely(tq->bar2_addr == NULL)) {
982 u32 val = PIDX(n); 983 u32 val = PIDX_V(n);
983 984
984 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, 985 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
985 QID(tq->cntxt_id) | val); 986 QID_V(tq->cntxt_id) | val);
986 } else { 987 } else {
987 u32 val = PIDX_T5(n); 988 u32 val = PIDX_T5_V(n);
988 989
989 /* T4 and later chips share the same PIDX field offset within 990 /* T4 and later chips share the same PIDX field offset within
990 * the doorbell, but T5 and later shrank the field in order to 991 * the doorbell, but T5 and later shrank the field in order to
@@ -992,7 +993,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
992 * large in the first place (14 bits) so we just use the T5 993 * large in the first place (14 bits) so we just use the T5
993 * and later limits and warn if a Queue ID is too large. 994 * and later limits and warn if a Queue ID is too large.
994 */ 995 */
995 WARN_ON(val & DBPRIO(1)); 996 WARN_ON(val & DBPRIO_F);
996 997
997 /* If we're only writing a single Egress Unit and the BAR2 998 /* If we're only writing a single Egress Unit and the BAR2
998 * Queue ID is 0, we can use the Write Combining Doorbell 999 * Queue ID is 0, we can use the Write Combining Doorbell
@@ -1023,7 +1024,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
1023 count--; 1024 count--;
1024 } 1025 }
1025 } else 1026 } else
1026 writel(val | QID(tq->bar2_qid), 1027 writel(val | QID_V(tq->bar2_qid),
1027 tq->bar2_addr + SGE_UDB_KDOORBELL); 1028 tq->bar2_addr + SGE_UDB_KDOORBELL);
1028 1029
1029 /* This Write Memory Barrier will force the write to the User 1030 /* This Write Memory Barrier will force the write to the User
@@ -1325,9 +1326,9 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1325 * If there's a VLAN tag present, add that to the list of things to 1326 * If there's a VLAN tag present, add that to the list of things to
1326 * do in this Work Request. 1327 * do in this Work Request.
1327 */ 1328 */
1328 if (vlan_tx_tag_present(skb)) { 1329 if (skb_vlan_tag_present(skb)) {
1329 txq->vlan_ins++; 1330 txq->vlan_ins++;
1330 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); 1331 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
1331 } 1332 }
1332 1333
1333 /* 1334 /*
@@ -1603,7 +1604,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1603 * If this is a good TCP packet and we have Generic Receive Offload 1604 * If this is a good TCP packet and we have Generic Receive Offload
1604 * enabled, handle the packet in the GRO path. 1605 * enabled, handle the packet in the GRO path.
1605 */ 1606 */
1606 if ((pkt->l2info & cpu_to_be32(RXF_TCP)) && 1607 if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
1607 (rspq->netdev->features & NETIF_F_GRO) && csum_ok && 1608 (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1608 !pkt->ip_frag) { 1609 !pkt->ip_frag) {
1609 do_gro(rxq, gl, pkt); 1610 do_gro(rxq, gl, pkt);
@@ -1625,7 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1625 rxq->stats.pkts++; 1626 rxq->stats.pkts++;
1626 1627
1627 if (csum_ok && !pkt->err_vec && 1628 if (csum_ok && !pkt->err_vec &&
1628 (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { 1629 (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
1629 if (!pkt->ip_frag) 1630 if (!pkt->ip_frag)
1630 skb->ip_summed = CHECKSUM_UNNECESSARY; 1631 skb->ip_summed = CHECKSUM_UNNECESSARY;
1631 else { 1632 else {
@@ -1875,13 +1876,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
1875 if (unlikely(work_done == 0)) 1876 if (unlikely(work_done == 0))
1876 rspq->unhandled_irqs++; 1877 rspq->unhandled_irqs++;
1877 1878
1878 val = CIDXINC(work_done) | SEINTARM(intr_params); 1879 val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
1879 if (is_t4(rspq->adapter->params.chip)) { 1880 if (is_t4(rspq->adapter->params.chip)) {
1880 t4_write_reg(rspq->adapter, 1881 t4_write_reg(rspq->adapter,
1881 T4VF_SGE_BASE_ADDR + SGE_VF_GTS, 1882 T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1882 val | INGRESSQID((u32)rspq->cntxt_id)); 1883 val | INGRESSQID_V((u32)rspq->cntxt_id));
1883 } else { 1884 } else {
1884 writel(val | INGRESSQID(rspq->bar2_qid), 1885 writel(val | INGRESSQID_V(rspq->bar2_qid),
1885 rspq->bar2_addr + SGE_UDB_GTS); 1886 rspq->bar2_addr + SGE_UDB_GTS);
1886 wmb(); 1887 wmb();
1887 } 1888 }
@@ -1975,12 +1976,12 @@ static unsigned int process_intrq(struct adapter *adapter)
1975 rspq_next(intrq); 1976 rspq_next(intrq);
1976 } 1977 }
1977 1978
1978 val = CIDXINC(work_done) | SEINTARM(intrq->intr_params); 1979 val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
1979 if (is_t4(adapter->params.chip)) 1980 if (is_t4(adapter->params.chip))
1980 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, 1981 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1981 val | INGRESSQID(intrq->cntxt_id)); 1982 val | INGRESSQID_V(intrq->cntxt_id));
1982 else { 1983 else {
1983 writel(val | INGRESSQID(intrq->bar2_qid), 1984 writel(val | INGRESSQID_V(intrq->bar2_qid),
1984 intrq->bar2_addr + SGE_UDB_GTS); 1985 intrq->bar2_addr + SGE_UDB_GTS);
1985 wmb(); 1986 wmb();
1986 } 1987 }
@@ -2583,7 +2584,7 @@ int t4vf_sge_init(struct adapter *adapter)
2583 fl0, fl1); 2584 fl0, fl1);
2584 return -EINVAL; 2585 return -EINVAL;
2585 } 2586 }
2586 if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) { 2587 if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
2587 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); 2588 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2588 return -EINVAL; 2589 return -EINVAL;
2589 } 2590 }
@@ -2593,9 +2594,9 @@ int t4vf_sge_init(struct adapter *adapter)
2593 */ 2594 */
2594 if (fl1) 2595 if (fl1)
2595 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; 2596 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2596 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) 2597 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2597 ? 128 : 64); 2598 ? 128 : 64);
2598 s->pktshift = PKTSHIFT_GET(sge_params->sge_control); 2599 s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2599 2600
2600 /* T4 uses a single control field to specify both the PCIe Padding and 2601 /* T4 uses a single control field to specify both the PCIe Padding and
2601 * Packing Boundary. T5 introduced the ability to specify these 2602 * Packing Boundary. T5 introduced the ability to specify these
@@ -2607,8 +2608,8 @@ int t4vf_sge_init(struct adapter *adapter)
2607 * end doing this because it would initialize the Padding Boundary and 2608 * end doing this because it would initialize the Padding Boundary and
2608 * leave the Packing Boundary initialized to 0 (16 bytes).) 2609 * leave the Packing Boundary initialized to 0 (16 bytes).)
2609 */ 2610 */
2610 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + 2611 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
2611 X_INGPADBOUNDARY_SHIFT); 2612 INGPADBOUNDARY_SHIFT_X);
2612 if (is_t4(adapter->params.chip)) { 2613 if (is_t4(adapter->params.chip)) {
2613 s->fl_align = ingpadboundary; 2614 s->fl_align = ingpadboundary;
2614 } else { 2615 } else {
@@ -2633,7 +2634,7 @@ int t4vf_sge_init(struct adapter *adapter)
2633 * Congestion Threshold is in units of 2 Free List pointers.) 2634 * Congestion Threshold is in units of 2 Free List pointers.)
2634 */ 2635 */
2635 s->fl_starve_thres 2636 s->fl_starve_thres
2636 = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1; 2637 = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
2637 2638
2638 /* 2639 /*
2639 * Set up tasklet timers. 2640 * Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
index c7b127d93767..b516b12b1884 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
@@ -64,8 +64,8 @@
64 * Mailbox Data in the fixed CIM PF map and the programmable VF map must 64 * Mailbox Data in the fixed CIM PF map and the programmable VF map must
65 * match. However, it's a useful convention ... 65 * match. However, it's a useful convention ...
66 */ 66 */
67#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA 67#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA_A
68#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA! 68#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA_A!
69#endif 69#endif
70 70
71/* 71/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 60426cf890a7..1b5506df35b1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -39,6 +39,7 @@
39#include "t4vf_defs.h" 39#include "t4vf_defs.h"
40 40
41#include "../cxgb4/t4_regs.h" 41#include "../cxgb4/t4_regs.h"
42#include "../cxgb4/t4_values.h"
42#include "../cxgb4/t4fw_api.h" 43#include "../cxgb4/t4fw_api.h"
43 44
44/* 45/*
@@ -137,9 +138,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
137 * Loop trying to get ownership of the mailbox. Return an error 138 * Loop trying to get ownership of the mailbox. Return an error
138 * if we can't gain ownership. 139 * if we can't gain ownership.
139 */ 140 */
140 v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); 141 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
141 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 142 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
142 v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); 143 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
143 if (v != MBOX_OWNER_DRV) 144 if (v != MBOX_OWNER_DRV)
144 return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; 145 return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
145 146
@@ -161,7 +162,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
161 t4_read_reg(adapter, mbox_data); /* flush write */ 162 t4_read_reg(adapter, mbox_data); /* flush write */
162 163
163 t4_write_reg(adapter, mbox_ctl, 164 t4_write_reg(adapter, mbox_ctl,
164 MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 165 MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
165 t4_read_reg(adapter, mbox_ctl); /* flush write */ 166 t4_read_reg(adapter, mbox_ctl); /* flush write */
166 167
167 /* 168 /*
@@ -183,14 +184,14 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
183 * If we're the owner, see if this is the reply we wanted. 184 * If we're the owner, see if this is the reply we wanted.
184 */ 185 */
185 v = t4_read_reg(adapter, mbox_ctl); 186 v = t4_read_reg(adapter, mbox_ctl);
186 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { 187 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
187 /* 188 /*
188 * If the Message Valid bit isn't on, revoke ownership 189 * If the Message Valid bit isn't on, revoke ownership
189 * of the mailbox and continue waiting for our reply. 190 * of the mailbox and continue waiting for our reply.
190 */ 191 */
191 if ((v & MBMSGVALID) == 0) { 192 if ((v & MBMSGVALID_F) == 0) {
192 t4_write_reg(adapter, mbox_ctl, 193 t4_write_reg(adapter, mbox_ctl,
193 MBOWNER(MBOX_OWNER_NONE)); 194 MBOWNER_V(MBOX_OWNER_NONE));
194 continue; 195 continue;
195 } 196 }
196 197
@@ -216,7 +217,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
216 & FW_CMD_REQUEST_F) != 0); 217 & FW_CMD_REQUEST_F) != 0);
217 } 218 }
218 t4_write_reg(adapter, mbox_ctl, 219 t4_write_reg(adapter, mbox_ctl,
219 MBOWNER(MBOX_OWNER_NONE)); 220 MBOWNER_V(MBOX_OWNER_NONE));
220 return -FW_CMD_RETVAL_G(v); 221 return -FW_CMD_RETVAL_G(v);
221 } 222 }
222 } 223 }
@@ -530,19 +531,19 @@ int t4vf_get_sge_params(struct adapter *adapter)
530 int v; 531 int v;
531 532
532 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 533 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
533 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL)); 534 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
534 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 535 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
535 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE)); 536 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
536 params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 537 params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
537 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0)); 538 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
538 params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 539 params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
539 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1)); 540 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
540 params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 541 params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
541 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1)); 542 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
542 params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 543 params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
543 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3)); 544 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
544 params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 545 params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
545 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5)); 546 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
546 v = t4vf_query_params(adapter, 7, params, vals); 547 v = t4vf_query_params(adapter, 7, params, vals);
547 if (v) 548 if (v)
548 return v; 549 return v;
@@ -578,9 +579,9 @@ int t4vf_get_sge_params(struct adapter *adapter)
578 } 579 }
579 580
580 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 581 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
581 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD)); 582 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
582 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 583 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
583 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL)); 584 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
584 v = t4vf_query_params(adapter, 2, params, vals); 585 v = t4vf_query_params(adapter, 2, params, vals);
585 if (v) 586 if (v)
586 return v; 587 return v;
@@ -617,8 +618,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
617 * the driver can just use it. 618 * the driver can just use it.
618 */ 619 */
619 whoami = t4_read_reg(adapter, 620 whoami = t4_read_reg(adapter,
620 T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI); 621 T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
621 pf = SOURCEPF_GET(whoami); 622 pf = SOURCEPF_G(whoami);
622 623
623 s_hps = (HOSTPAGESIZEPF0_S + 624 s_hps = (HOSTPAGESIZEPF0_S +
624 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); 625 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
@@ -630,10 +631,10 @@ int t4vf_get_sge_params(struct adapter *adapter)
630 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); 631 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
631 sge_params->sge_vf_eq_qpp = 632 sge_params->sge_vf_eq_qpp =
632 ((sge_params->sge_egress_queues_per_page >> s_qpp) 633 ((sge_params->sge_egress_queues_per_page >> s_qpp)
633 & QUEUESPERPAGEPF0_MASK); 634 & QUEUESPERPAGEPF0_M);
634 sge_params->sge_vf_iq_qpp = 635 sge_params->sge_vf_iq_qpp =
635 ((sge_params->sge_ingress_queues_per_page >> s_qpp) 636 ((sge_params->sge_ingress_queues_per_page >> s_qpp)
636 & QUEUESPERPAGEPF0_MASK); 637 & QUEUESPERPAGEPF0_M);
637 } 638 }
638 639
639 return 0; 640 return 0;
@@ -1592,7 +1593,7 @@ int t4vf_prep_adapter(struct adapter *adapter)
1592 break; 1593 break;
1593 1594
1594 case CHELSIO_T5: 1595 case CHELSIO_T5:
1595 chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); 1596 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
1596 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); 1597 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
1597 break; 1598 break;
1598 } 1599 }
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 3a12c096ea1c..de9f7c97d916 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -475,8 +475,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
475 if (d) 475 if (d)
476 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE); 476 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
477 477
478 if (ep->rx_buf[i] != NULL) 478 kfree(ep->rx_buf[i]);
479 kfree(ep->rx_buf[i]);
480 } 479 }
481 480
482 for (i = 0; i < TX_QUEUE_ENTRIES; i++) { 481 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
@@ -486,8 +485,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
486 if (d) 485 if (d)
487 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE); 486 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
488 487
489 if (ep->tx_buf[i] != NULL) 488 kfree(ep->tx_buf[i]);
490 kfree(ep->tx_buf[i]);
491 } 489 }
492 490
493 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, 491 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 25c4d88853d8..84b6a2b46aec 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
33 33
34#define DRV_NAME "enic" 34#define DRV_NAME "enic"
35#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 35#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
36#define DRV_VERSION "2.1.1.67" 36#define DRV_VERSION "2.1.1.83"
37#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" 37#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
38 38
39#define ENIC_BARS_MAX 6 39#define ENIC_BARS_MAX 6
@@ -188,6 +188,7 @@ struct enic {
188 struct enic_rfs_flw_tbl rfs_h; 188 struct enic_rfs_flw_tbl rfs_h;
189 u32 rx_copybreak; 189 u32 rx_copybreak;
190 u8 rss_key[ENIC_RSS_LEN]; 190 u8 rss_key[ENIC_RSS_LEN];
191 struct vnic_gen_stats gen_stats;
191}; 192};
192 193
193static inline struct device *enic_get_dev(struct enic *enic) 194static inline struct device *enic_get_dev(struct enic *enic)
@@ -242,6 +243,19 @@ static inline unsigned int enic_msix_notify_intr(struct enic *enic)
242 return enic->rq_count + enic->wq_count + 1; 243 return enic->rq_count + enic->wq_count + 1;
243} 244}
244 245
246static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
247{
248 if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
249 net_warn_ratelimited("%s: PCI dma mapping failed!\n",
250 enic->netdev->name);
251 enic->gen_stats.dma_map_error++;
252
253 return -ENOMEM;
254 }
255
256 return 0;
257}
258
245void enic_reset_addr_lists(struct enic *enic); 259void enic_reset_addr_lists(struct enic *enic);
246int enic_sriov_enabled(struct enic *enic); 260int enic_sriov_enabled(struct enic *enic);
247int enic_is_valid_vf(struct enic *enic, int vf); 261int enic_is_valid_vf(struct enic *enic, int vf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index 87ddc44b590e..f8d2a6a34282 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -177,40 +177,6 @@ int enic_dev_intr_coal_timer_info(struct enic *enic)
177 return err; 177 return err;
178} 178}
179 179
180int enic_vnic_dev_deinit(struct enic *enic)
181{
182 int err;
183
184 spin_lock_bh(&enic->devcmd_lock);
185 err = vnic_dev_deinit(enic->vdev);
186 spin_unlock_bh(&enic->devcmd_lock);
187
188 return err;
189}
190
191int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp)
192{
193 int err;
194
195 spin_lock_bh(&enic->devcmd_lock);
196 err = vnic_dev_init_prov2(enic->vdev,
197 (u8 *)vp, vic_provinfo_size(vp));
198 spin_unlock_bh(&enic->devcmd_lock);
199
200 return err;
201}
202
203int enic_dev_deinit_done(struct enic *enic, int *status)
204{
205 int err;
206
207 spin_lock_bh(&enic->devcmd_lock);
208 err = vnic_dev_deinit_done(enic->vdev, status);
209 spin_unlock_bh(&enic->devcmd_lock);
210
211 return err;
212}
213
214/* rtnl lock is held */ 180/* rtnl lock is held */
215int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) 181int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
216{ 182{
@@ -237,28 +203,6 @@ int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
237 return err; 203 return err;
238} 204}
239 205
240int enic_dev_enable2(struct enic *enic, int active)
241{
242 int err;
243
244 spin_lock_bh(&enic->devcmd_lock);
245 err = vnic_dev_enable2(enic->vdev, active);
246 spin_unlock_bh(&enic->devcmd_lock);
247
248 return err;
249}
250
251int enic_dev_enable2_done(struct enic *enic, int *status)
252{
253 int err;
254
255 spin_lock_bh(&enic->devcmd_lock);
256 err = vnic_dev_enable2_done(enic->vdev, status);
257 spin_unlock_bh(&enic->devcmd_lock);
258
259 return err;
260}
261
262int enic_dev_status_to_errno(int devcmd_status) 206int enic_dev_status_to_errno(int devcmd_status)
263{ 207{
264 switch (devcmd_status) { 208 switch (devcmd_status) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 10bb970b2f35..f5bb058b3f96 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -55,11 +55,6 @@ int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
55int enic_dev_enable(struct enic *enic); 55int enic_dev_enable(struct enic *enic);
56int enic_dev_disable(struct enic *enic); 56int enic_dev_disable(struct enic *enic);
57int enic_dev_intr_coal_timer_info(struct enic *enic); 57int enic_dev_intr_coal_timer_info(struct enic *enic);
58int enic_vnic_dev_deinit(struct enic *enic);
59int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp);
60int enic_dev_deinit_done(struct enic *enic, int *status);
61int enic_dev_enable2(struct enic *enic, int arg);
62int enic_dev_enable2_done(struct enic *enic, int *status);
63int enic_dev_status_to_errno(int devcmd_status); 58int enic_dev_status_to_errno(int devcmd_status);
64 59
65#endif /* _ENIC_DEV_H_ */ 60#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index eba1eb846d34..28d9ca675a27 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -24,6 +24,7 @@
24#include "enic_dev.h" 24#include "enic_dev.h"
25#include "enic_clsf.h" 25#include "enic_clsf.h"
26#include "vnic_rss.h" 26#include "vnic_rss.h"
27#include "vnic_stats.h"
27 28
28struct enic_stat { 29struct enic_stat {
29 char name[ETH_GSTRING_LEN]; 30 char name[ETH_GSTRING_LEN];
@@ -40,6 +41,11 @@ struct enic_stat {
40 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \ 41 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
41} 42}
42 43
44#define ENIC_GEN_STAT(stat) { \
45 .name = #stat, \
46 .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
47}
48
43static const struct enic_stat enic_tx_stats[] = { 49static const struct enic_stat enic_tx_stats[] = {
44 ENIC_TX_STAT(tx_frames_ok), 50 ENIC_TX_STAT(tx_frames_ok),
45 ENIC_TX_STAT(tx_unicast_frames_ok), 51 ENIC_TX_STAT(tx_unicast_frames_ok),
@@ -78,10 +84,15 @@ static const struct enic_stat enic_rx_stats[] = {
78 ENIC_RX_STAT(rx_frames_to_max), 84 ENIC_RX_STAT(rx_frames_to_max),
79}; 85};
80 86
87static const struct enic_stat enic_gen_stats[] = {
88 ENIC_GEN_STAT(dma_map_error),
89};
90
81static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); 91static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
82static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); 92static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
93static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
83 94
84void enic_intr_coal_set_rx(struct enic *enic, u32 timer) 95static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
85{ 96{
86 int i; 97 int i;
87 int intr; 98 int intr;
@@ -146,6 +157,10 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset,
146 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); 157 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
147 data += ETH_GSTRING_LEN; 158 data += ETH_GSTRING_LEN;
148 } 159 }
160 for (i = 0; i < enic_n_gen_stats; i++) {
161 memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
162 data += ETH_GSTRING_LEN;
163 }
149 break; 164 break;
150 } 165 }
151} 166}
@@ -154,7 +169,7 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
154{ 169{
155 switch (sset) { 170 switch (sset) {
156 case ETH_SS_STATS: 171 case ETH_SS_STATS:
157 return enic_n_tx_stats + enic_n_rx_stats; 172 return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
158 default: 173 default:
159 return -EOPNOTSUPP; 174 return -EOPNOTSUPP;
160 } 175 }
@@ -173,6 +188,8 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
173 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; 188 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
174 for (i = 0; i < enic_n_rx_stats; i++) 189 for (i = 0; i < enic_n_rx_stats; i++)
175 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index]; 190 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
191 for (i = 0; i < enic_n_gen_stats; i++)
192 *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
176} 193}
177 194
178static u32 enic_get_msglevel(struct net_device *netdev) 195static u32 enic_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index e356afa44e7d..9cbe038a388e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -45,6 +45,7 @@
45#ifdef CONFIG_NET_RX_BUSY_POLL 45#ifdef CONFIG_NET_RX_BUSY_POLL
46#include <net/busy_poll.h> 46#include <net/busy_poll.h>
47#endif 47#endif
48#include <linux/crash_dump.h>
48 49
49#include "cq_enet_desc.h" 50#include "cq_enet_desc.h"
50#include "vnic_dev.h" 51#include "vnic_dev.h"
@@ -88,7 +89,7 @@ MODULE_DEVICE_TABLE(pci, enic_id_table);
88 * coalescing timer values 89 * coalescing timer values
89 * {rx_rate in Mbps, mapping percentage of the range} 90 * {rx_rate in Mbps, mapping percentage of the range}
90 */ 91 */
91struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = { 92static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
92 {4000, 0}, 93 {4000, 0},
93 {4400, 10}, 94 {4400, 10},
94 {5060, 20}, 95 {5060, 20},
@@ -105,7 +106,7 @@ struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
105/* This table helps the driver to pick different ranges for rx coalescing 106/* This table helps the driver to pick different ranges for rx coalescing
106 * timer depending on the link speed. 107 * timer depending on the link speed.
107 */ 108 */
108struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = { 109static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
109 {0, 0}, /* 0 - 4 Gbps */ 110 {0, 0}, /* 0 - 4 Gbps */
110 {0, 3}, /* 4 - 10 Gbps */ 111 {0, 3}, /* 4 - 10 Gbps */
111 {3, 6}, /* 10 - 40 Gbps */ 112 {3, 6}, /* 10 - 40 Gbps */
@@ -351,80 +352,94 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
351 return IRQ_HANDLED; 352 return IRQ_HANDLED;
352} 353}
353 354
354static inline void enic_queue_wq_skb_cont(struct enic *enic, 355static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
355 struct vnic_wq *wq, struct sk_buff *skb, 356 struct sk_buff *skb, unsigned int len_left,
356 unsigned int len_left, int loopback) 357 int loopback)
357{ 358{
358 const skb_frag_t *frag; 359 const skb_frag_t *frag;
360 dma_addr_t dma_addr;
359 361
360 /* Queue additional data fragments */ 362 /* Queue additional data fragments */
361 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 363 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
362 len_left -= skb_frag_size(frag); 364 len_left -= skb_frag_size(frag);
363 enic_queue_wq_desc_cont(wq, skb, 365 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
364 skb_frag_dma_map(&enic->pdev->dev, 366 skb_frag_size(frag),
365 frag, 0, skb_frag_size(frag), 367 DMA_TO_DEVICE);
366 DMA_TO_DEVICE), 368 if (unlikely(enic_dma_map_check(enic, dma_addr)))
367 skb_frag_size(frag), 369 return -ENOMEM;
368 (len_left == 0), /* EOP? */ 370 enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
369 loopback); 371 (len_left == 0), /* EOP? */
372 loopback);
370 } 373 }
374
375 return 0;
371} 376}
372 377
373static inline void enic_queue_wq_skb_vlan(struct enic *enic, 378static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
374 struct vnic_wq *wq, struct sk_buff *skb, 379 struct sk_buff *skb, int vlan_tag_insert,
375 int vlan_tag_insert, unsigned int vlan_tag, int loopback) 380 unsigned int vlan_tag, int loopback)
376{ 381{
377 unsigned int head_len = skb_headlen(skb); 382 unsigned int head_len = skb_headlen(skb);
378 unsigned int len_left = skb->len - head_len; 383 unsigned int len_left = skb->len - head_len;
379 int eop = (len_left == 0); 384 int eop = (len_left == 0);
385 dma_addr_t dma_addr;
386 int err = 0;
387
388 dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
389 PCI_DMA_TODEVICE);
390 if (unlikely(enic_dma_map_check(enic, dma_addr)))
391 return -ENOMEM;
380 392
381 /* Queue the main skb fragment. The fragments are no larger 393 /* Queue the main skb fragment. The fragments are no larger
382 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 394 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
383 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 395 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
384 * per fragment is queued. 396 * per fragment is queued.
385 */ 397 */
386 enic_queue_wq_desc(wq, skb, 398 enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert,
387 pci_map_single(enic->pdev, skb->data, 399 vlan_tag, eop, loopback);
388 head_len, PCI_DMA_TODEVICE),
389 head_len,
390 vlan_tag_insert, vlan_tag,
391 eop, loopback);
392 400
393 if (!eop) 401 if (!eop)
394 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 402 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
403
404 return err;
395} 405}
396 406
397static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, 407static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
398 struct vnic_wq *wq, struct sk_buff *skb, 408 struct sk_buff *skb, int vlan_tag_insert,
399 int vlan_tag_insert, unsigned int vlan_tag, int loopback) 409 unsigned int vlan_tag, int loopback)
400{ 410{
401 unsigned int head_len = skb_headlen(skb); 411 unsigned int head_len = skb_headlen(skb);
402 unsigned int len_left = skb->len - head_len; 412 unsigned int len_left = skb->len - head_len;
403 unsigned int hdr_len = skb_checksum_start_offset(skb); 413 unsigned int hdr_len = skb_checksum_start_offset(skb);
404 unsigned int csum_offset = hdr_len + skb->csum_offset; 414 unsigned int csum_offset = hdr_len + skb->csum_offset;
405 int eop = (len_left == 0); 415 int eop = (len_left == 0);
416 dma_addr_t dma_addr;
417 int err = 0;
418
419 dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
420 PCI_DMA_TODEVICE);
421 if (unlikely(enic_dma_map_check(enic, dma_addr)))
422 return -ENOMEM;
406 423
407 /* Queue the main skb fragment. The fragments are no larger 424 /* Queue the main skb fragment. The fragments are no larger
408 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 425 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
409 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 426 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
410 * per fragment is queued. 427 * per fragment is queued.
411 */ 428 */
412 enic_queue_wq_desc_csum_l4(wq, skb, 429 enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset,
413 pci_map_single(enic->pdev, skb->data, 430 hdr_len, vlan_tag_insert, vlan_tag, eop,
414 head_len, PCI_DMA_TODEVICE), 431 loopback);
415 head_len,
416 csum_offset,
417 hdr_len,
418 vlan_tag_insert, vlan_tag,
419 eop, loopback);
420 432
421 if (!eop) 433 if (!eop)
422 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 434 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
435
436 return err;
423} 437}
424 438
425static inline void enic_queue_wq_skb_tso(struct enic *enic, 439static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
426 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, 440 struct sk_buff *skb, unsigned int mss,
427 int vlan_tag_insert, unsigned int vlan_tag, int loopback) 441 int vlan_tag_insert, unsigned int vlan_tag,
442 int loopback)
428{ 443{
429 unsigned int frag_len_left = skb_headlen(skb); 444 unsigned int frag_len_left = skb_headlen(skb);
430 unsigned int len_left = skb->len - frag_len_left; 445 unsigned int len_left = skb->len - frag_len_left;
@@ -454,20 +469,19 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
454 */ 469 */
455 while (frag_len_left) { 470 while (frag_len_left) {
456 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); 471 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
457 dma_addr = pci_map_single(enic->pdev, skb->data + offset, 472 dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
458 len, PCI_DMA_TODEVICE); 473 PCI_DMA_TODEVICE);
459 enic_queue_wq_desc_tso(wq, skb, 474 if (unlikely(enic_dma_map_check(enic, dma_addr)))
460 dma_addr, 475 return -ENOMEM;
461 len, 476 enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
462 mss, hdr_len, 477 vlan_tag_insert, vlan_tag,
463 vlan_tag_insert, vlan_tag, 478 eop && (len == frag_len_left), loopback);
464 eop && (len == frag_len_left), loopback);
465 frag_len_left -= len; 479 frag_len_left -= len;
466 offset += len; 480 offset += len;
467 } 481 }
468 482
469 if (eop) 483 if (eop)
470 return; 484 return 0;
471 485
472 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors 486 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
473 * for additional data fragments 487 * for additional data fragments
@@ -483,16 +497,18 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
483 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 497 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
484 offset, len, 498 offset, len,
485 DMA_TO_DEVICE); 499 DMA_TO_DEVICE);
486 enic_queue_wq_desc_cont(wq, skb, 500 if (unlikely(enic_dma_map_check(enic, dma_addr)))
487 dma_addr, 501 return -ENOMEM;
488 len, 502 enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
489 (len_left == 0) && 503 (len_left == 0) &&
490 (len == frag_len_left), /* EOP? */ 504 (len == frag_len_left),/*EOP*/
491 loopback); 505 loopback);
492 frag_len_left -= len; 506 frag_len_left -= len;
493 offset += len; 507 offset += len;
494 } 508 }
495 } 509 }
510
511 return 0;
496} 512}
497 513
498static inline void enic_queue_wq_skb(struct enic *enic, 514static inline void enic_queue_wq_skb(struct enic *enic,
@@ -502,25 +518,42 @@ static inline void enic_queue_wq_skb(struct enic *enic,
502 unsigned int vlan_tag = 0; 518 unsigned int vlan_tag = 0;
503 int vlan_tag_insert = 0; 519 int vlan_tag_insert = 0;
504 int loopback = 0; 520 int loopback = 0;
521 int err;
505 522
506 if (vlan_tx_tag_present(skb)) { 523 if (skb_vlan_tag_present(skb)) {
507 /* VLAN tag from trunking driver */ 524 /* VLAN tag from trunking driver */
508 vlan_tag_insert = 1; 525 vlan_tag_insert = 1;
509 vlan_tag = vlan_tx_tag_get(skb); 526 vlan_tag = skb_vlan_tag_get(skb);
510 } else if (enic->loop_enable) { 527 } else if (enic->loop_enable) {
511 vlan_tag = enic->loop_tag; 528 vlan_tag = enic->loop_tag;
512 loopback = 1; 529 loopback = 1;
513 } 530 }
514 531
515 if (mss) 532 if (mss)
516 enic_queue_wq_skb_tso(enic, wq, skb, mss, 533 err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
517 vlan_tag_insert, vlan_tag, loopback); 534 vlan_tag_insert, vlan_tag,
535 loopback);
518 else if (skb->ip_summed == CHECKSUM_PARTIAL) 536 else if (skb->ip_summed == CHECKSUM_PARTIAL)
519 enic_queue_wq_skb_csum_l4(enic, wq, skb, 537 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
520 vlan_tag_insert, vlan_tag, loopback); 538 vlan_tag, loopback);
521 else 539 else
522 enic_queue_wq_skb_vlan(enic, wq, skb, 540 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
523 vlan_tag_insert, vlan_tag, loopback); 541 vlan_tag, loopback);
542 if (unlikely(err)) {
543 struct vnic_wq_buf *buf;
544
545 buf = wq->to_use->prev;
546 /* while not EOP of previous pkt && queue not empty.
547 * For all non EOP bufs, os_buf is NULL.
548 */
549 while (!buf->os_buf && (buf->next != wq->to_clean)) {
550 enic_free_wq_buf(wq, buf);
551 wq->ring.desc_avail++;
552 buf = buf->prev;
553 }
554 wq->to_use = buf->next;
555 dev_kfree_skb(skb);
556 }
524} 557}
525 558
526/* netif_tx_lock held, process context with BHs disabled, or BH */ 559/* netif_tx_lock held, process context with BHs disabled, or BH */
@@ -950,8 +983,12 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
950 if (!skb) 983 if (!skb)
951 return -ENOMEM; 984 return -ENOMEM;
952 985
953 dma_addr = pci_map_single(enic->pdev, skb->data, 986 dma_addr = pci_map_single(enic->pdev, skb->data, len,
954 len, PCI_DMA_FROMDEVICE); 987 PCI_DMA_FROMDEVICE);
988 if (unlikely(enic_dma_map_check(enic, dma_addr))) {
989 dev_kfree_skb(skb);
990 return -ENOMEM;
991 }
955 992
956 enic_queue_rq_desc(rq, skb, os_buf_index, 993 enic_queue_rq_desc(rq, skb, os_buf_index,
957 dma_addr, len); 994 dma_addr, len);
@@ -1266,7 +1303,7 @@ static void enic_set_rx_cpu_rmap(struct enic *enic)
1266#endif /* CONFIG_RFS_ACCEL */ 1303#endif /* CONFIG_RFS_ACCEL */
1267 1304
1268#ifdef CONFIG_NET_RX_BUSY_POLL 1305#ifdef CONFIG_NET_RX_BUSY_POLL
1269int enic_busy_poll(struct napi_struct *napi) 1306static int enic_busy_poll(struct napi_struct *napi)
1270{ 1307{
1271 struct net_device *netdev = napi->dev; 1308 struct net_device *netdev = napi->dev;
1272 struct enic *enic = netdev_priv(netdev); 1309 struct enic *enic = netdev_priv(netdev);
@@ -2231,6 +2268,18 @@ static void enic_dev_deinit(struct enic *enic)
2231 enic_clear_intr_mode(enic); 2268 enic_clear_intr_mode(enic);
2232} 2269}
2233 2270
2271static void enic_kdump_kernel_config(struct enic *enic)
2272{
2273 if (is_kdump_kernel()) {
2274 dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
2275 enic->rq_count = 1;
2276 enic->wq_count = 1;
2277 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
2278 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
2279 enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
2280 }
2281}
2282
2234static int enic_dev_init(struct enic *enic) 2283static int enic_dev_init(struct enic *enic)
2235{ 2284{
2236 struct device *dev = enic_get_dev(enic); 2285 struct device *dev = enic_get_dev(enic);
@@ -2260,6 +2309,10 @@ static int enic_dev_init(struct enic *enic)
2260 2309
2261 enic_get_res_counts(enic); 2310 enic_get_res_counts(enic);
2262 2311
2312 /* modify resource count if we are in kdump_kernel
2313 */
2314 enic_kdump_kernel_config(enic);
2315
2263 /* Set interrupt mode based on resource counts and system 2316 /* Set interrupt mode based on resource counts and system
2264 * capabilities 2317 * capabilities
2265 */ 2318 */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_stats.h b/drivers/net/ethernet/cisco/enic/vnic_stats.h
index 77750ec93954..74c81ed6fdab 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_stats.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_stats.h
@@ -62,6 +62,11 @@ struct vnic_rx_stats {
62 u64 rsvd[16]; 62 u64 rsvd[16];
63}; 63};
64 64
65/* Generic statistics */
66struct vnic_gen_stats {
67 u64 dma_map_error;
68};
69
65struct vnic_stats { 70struct vnic_stats {
66 struct vnic_tx_stats tx; 71 struct vnic_tx_stats tx;
67 struct vnic_rx_stats rx; 72 struct vnic_rx_stats rx;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 3e6b8d54dafc..b5a1c937fad2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -47,11 +47,14 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
47 wq->ring.desc_size * buf->index; 47 wq->ring.desc_size * buf->index;
48 if (buf->index + 1 == count) { 48 if (buf->index + 1 == count) {
49 buf->next = wq->bufs[0]; 49 buf->next = wq->bufs[0];
50 buf->next->prev = buf;
50 break; 51 break;
51 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { 52 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
52 buf->next = wq->bufs[i + 1]; 53 buf->next = wq->bufs[i + 1];
54 buf->next->prev = buf;
53 } else { 55 } else {
54 buf->next = buf + 1; 56 buf->next = buf + 1;
57 buf->next->prev = buf;
55 buf++; 58 buf++;
56 } 59 }
57 } 60 }
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 816f1ad6072f..296154351823 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -62,6 +62,7 @@ struct vnic_wq_buf {
62 uint8_t cq_entry; /* Gets completion event from hw */ 62 uint8_t cq_entry; /* Gets completion event from hw */
63 uint8_t desc_skip_cnt; /* Num descs to occupy */ 63 uint8_t desc_skip_cnt; /* Num descs to occupy */
64 uint8_t compressed_send; /* Both hdr and payload in one desc */ 64 uint8_t compressed_send; /* Both hdr and payload in one desc */
65 struct vnic_wq_buf *prev;
65}; 66};
66 67
67/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ 68/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index ef0bb58750e6..c0a7813603c3 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -36,6 +36,9 @@
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/irq.h> 37#include <linux/irq.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/regulator/consumer.h>
40#include <linux/gpio.h>
41#include <linux/of_gpio.h>
39 42
40#include <asm/delay.h> 43#include <asm/delay.h>
41#include <asm/irq.h> 44#include <asm/irq.h>
@@ -1426,11 +1429,48 @@ dm9000_probe(struct platform_device *pdev)
1426 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev); 1429 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1427 struct board_info *db; /* Point a board information structure */ 1430 struct board_info *db; /* Point a board information structure */
1428 struct net_device *ndev; 1431 struct net_device *ndev;
1432 struct device *dev = &pdev->dev;
1429 const unsigned char *mac_src; 1433 const unsigned char *mac_src;
1430 int ret = 0; 1434 int ret = 0;
1431 int iosize; 1435 int iosize;
1432 int i; 1436 int i;
1433 u32 id_val; 1437 u32 id_val;
1438 int reset_gpios;
1439 enum of_gpio_flags flags;
1440 struct regulator *power;
1441
1442 power = devm_regulator_get(dev, "vcc");
1443 if (IS_ERR(power)) {
1444 if (PTR_ERR(power) == -EPROBE_DEFER)
1445 return -EPROBE_DEFER;
1446 dev_dbg(dev, "no regulator provided\n");
1447 } else {
1448 ret = regulator_enable(power);
1449 if (ret != 0) {
1450 dev_err(dev,
1451 "Failed to enable power regulator: %d\n", ret);
1452 return ret;
1453 }
1454 dev_dbg(dev, "regulator enabled\n");
1455 }
1456
1457 reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
1458 &flags);
1459 if (gpio_is_valid(reset_gpios)) {
1460 ret = devm_gpio_request_one(dev, reset_gpios, flags,
1461 "dm9000_reset");
1462 if (ret) {
1463 dev_err(dev, "failed to request reset gpio %d: %d\n",
1464 reset_gpios, ret);
1465 return -ENODEV;
1466 }
1467
1468 /* According to manual PWRST# Low Period Min 1ms */
1469 msleep(2);
1470 gpio_set_value(reset_gpios, 1);
1471 /* Needs 3ms to read eeprom when PWRST is deasserted */
1472 msleep(4);
1473 }
1434 1474
1435 if (!pdata) { 1475 if (!pdata) {
1436 pdata = dm9000_parse_dt(&pdev->dev); 1476 pdata = dm9000_parse_dt(&pdev->dev);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 6aa887e0e1cb..9beb3d34d4ba 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -904,7 +904,7 @@ static void init_registers(struct net_device *dev)
904 } 904 }
905#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) 905#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
906 i |= 0xE000; 906 i |= 0xE000;
907#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) 907#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
908 i |= 0x4800; 908 i |= 0x4800;
909#else 909#else
910#warning Processor architecture undefined 910#warning Processor architecture undefined
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 712e7f8e1df7..27de37aa90af 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -59,26 +59,6 @@
59#define OC_SUBSYS_DEVICE_ID3 0xE612 59#define OC_SUBSYS_DEVICE_ID3 0xE612
60#define OC_SUBSYS_DEVICE_ID4 0xE652 60#define OC_SUBSYS_DEVICE_ID4 0xE652
61 61
62static inline char *nic_name(struct pci_dev *pdev)
63{
64 switch (pdev->device) {
65 case OC_DEVICE_ID1:
66 return OC_NAME;
67 case OC_DEVICE_ID2:
68 return OC_NAME_BE;
69 case OC_DEVICE_ID3:
70 case OC_DEVICE_ID4:
71 return OC_NAME_LANCER;
72 case BE_DEVICE_ID2:
73 return BE3_NAME;
74 case OC_DEVICE_ID5:
75 case OC_DEVICE_ID6:
76 return OC_NAME_SH;
77 default:
78 return BE_NAME;
79 }
80}
81
82/* Number of bytes of an RX frame that are copied to skb->data */ 62/* Number of bytes of an RX frame that are copied to skb->data */
83#define BE_HDR_LEN ((u16) 64) 63#define BE_HDR_LEN ((u16) 64)
84/* allocate extra space to allow tunneling decapsulation without head reallocation */ 64/* allocate extra space to allow tunneling decapsulation without head reallocation */
@@ -243,7 +223,6 @@ struct be_tx_stats {
243 u64 tx_bytes; 223 u64 tx_bytes;
244 u64 tx_pkts; 224 u64 tx_pkts;
245 u64 tx_reqs; 225 u64 tx_reqs;
246 u64 tx_wrbs;
247 u64 tx_compl; 226 u64 tx_compl;
248 ulong tx_jiffies; 227 ulong tx_jiffies;
249 u32 tx_stops; 228 u32 tx_stops;
@@ -266,6 +245,9 @@ struct be_tx_obj {
266 /* Remember the skbs that were transmitted */ 245 /* Remember the skbs that were transmitted */
267 struct sk_buff *sent_skb_list[TX_Q_LEN]; 246 struct sk_buff *sent_skb_list[TX_Q_LEN];
268 struct be_tx_stats stats; 247 struct be_tx_stats stats;
248 u16 pend_wrb_cnt; /* Number of WRBs yet to be given to HW */
249 u16 last_req_wrb_cnt; /* wrb cnt of the last req in the Q */
250 u16 last_req_hdr; /* index of the last req's hdr-wrb */
269} ____cacheline_aligned_in_smp; 251} ____cacheline_aligned_in_smp;
270 252
271/* Struct to remember the pages posted for rx frags */ 253/* Struct to remember the pages posted for rx frags */
@@ -379,15 +361,14 @@ enum vf_state {
379 ASSIGNED = 1 361 ASSIGNED = 1
380}; 362};
381 363
382#define BE_FLAGS_LINK_STATUS_INIT 1 364#define BE_FLAGS_LINK_STATUS_INIT BIT(1)
383#define BE_FLAGS_SRIOV_ENABLED (1 << 2) 365#define BE_FLAGS_SRIOV_ENABLED BIT(2)
384#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 366#define BE_FLAGS_WORKER_SCHEDULED BIT(3)
385#define BE_FLAGS_VLAN_PROMISC (1 << 4) 367#define BE_FLAGS_NAPI_ENABLED BIT(6)
386#define BE_FLAGS_MCAST_PROMISC (1 << 5) 368#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD BIT(7)
387#define BE_FLAGS_NAPI_ENABLED (1 << 9) 369#define BE_FLAGS_VXLAN_OFFLOADS BIT(8)
388#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) 370#define BE_FLAGS_SETUP_DONE BIT(9)
389#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12) 371#define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10)
390#define BE_FLAGS_SETUP_DONE (1 << 13)
391 372
392#define BE_UC_PMAC_COUNT 30 373#define BE_UC_PMAC_COUNT 30
393#define BE_VF_UC_PMAC_COUNT 2 374#define BE_VF_UC_PMAC_COUNT 2
@@ -397,6 +378,8 @@ enum vf_state {
397#define LANCER_DELETE_FW_DUMP 0x2 378#define LANCER_DELETE_FW_DUMP 0x2
398 379
399struct phy_info { 380struct phy_info {
381/* From SFF-8472 spec */
382#define SFP_VENDOR_NAME_LEN 17
400 u8 transceiver; 383 u8 transceiver;
401 u8 autoneg; 384 u8 autoneg;
402 u8 fc_autoneg; 385 u8 fc_autoneg;
@@ -410,6 +393,8 @@ struct phy_info {
410 u32 advertising; 393 u32 advertising;
411 u32 supported; 394 u32 supported;
412 u8 cable_type; 395 u8 cable_type;
396 u8 vendor_name[SFP_VENDOR_NAME_LEN];
397 u8 vendor_pn[SFP_VENDOR_NAME_LEN];
413}; 398};
414 399
415struct be_resources { 400struct be_resources {
@@ -467,8 +452,6 @@ struct be_adapter {
467 452
468 struct be_drv_stats drv_stats; 453 struct be_drv_stats drv_stats;
469 struct be_aic_obj aic_obj[MAX_EVT_QS]; 454 struct be_aic_obj aic_obj[MAX_EVT_QS];
470 u16 vlans_added;
471 unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
472 u8 vlan_prio_bmap; /* Available Priority BitMap */ 455 u8 vlan_prio_bmap; /* Available Priority BitMap */
473 u16 recommended_prio; /* Recommended Priority */ 456 u16 recommended_prio; /* Recommended Priority */
474 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */ 457 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -484,8 +467,15 @@ struct be_adapter {
484 /* Ethtool knobs and info */ 467 /* Ethtool knobs and info */
485 char fw_ver[FW_VER_LEN]; 468 char fw_ver[FW_VER_LEN];
486 char fw_on_flash[FW_VER_LEN]; 469 char fw_on_flash[FW_VER_LEN];
470
471 /* IFACE filtering fields */
487 int if_handle; /* Used to configure filtering */ 472 int if_handle; /* Used to configure filtering */
473 u32 if_flags; /* Interface filtering flags */
488 u32 *pmac_id; /* MAC addr handle used by BE card */ 474 u32 *pmac_id; /* MAC addr handle used by BE card */
475 u32 uc_macs; /* Count of secondary UC MAC programmed */
476 unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
477 u16 vlans_added;
478
489 u32 beacon_state; /* for set_phys_id */ 479 u32 beacon_state; /* for set_phys_id */
490 480
491 bool eeh_error; 481 bool eeh_error;
@@ -493,7 +483,7 @@ struct be_adapter {
493 bool hw_error; 483 bool hw_error;
494 484
495 u32 port_num; 485 u32 port_num;
496 bool promiscuous; 486 char port_name;
497 u8 mc_type; 487 u8 mc_type;
498 u32 function_mode; 488 u32 function_mode;
499 u32 function_caps; 489 u32 function_caps;
@@ -526,7 +516,6 @@ struct be_adapter {
526 struct phy_info phy; 516 struct phy_info phy;
527 u8 wol_cap; 517 u8 wol_cap;
528 bool wol_en; 518 bool wol_en;
529 u32 uc_macs; /* Count of secondary UC MAC programmed */
530 u16 asic_rev; 519 u16 asic_rev;
531 u16 qnq_vid; 520 u16 qnq_vid;
532 u32 msg_enable; 521 u32 msg_enable;
@@ -732,19 +721,6 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb)
732 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 721 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
733} 722}
734 723
735static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
736{
737 u32 addr;
738
739 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
740
741 mac[5] = (u8)(addr & 0xFF);
742 mac[4] = (u8)((addr >> 8) & 0xFF);
743 mac[3] = (u8)((addr >> 16) & 0xFF);
744 /* Use the OUI from the current MAC address */
745 memcpy(mac, adapter->netdev->dev_addr, 3);
746}
747
748static inline bool be_multi_rxq(const struct be_adapter *adapter) 724static inline bool be_multi_rxq(const struct be_adapter *adapter)
749{ 725{
750 return adapter->num_rx_qs > 1; 726 return adapter->num_rx_qs > 1;
@@ -767,129 +743,6 @@ static inline void be_clear_all_error(struct be_adapter *adapter)
767 adapter->fw_timeout = false; 743 adapter->fw_timeout = false;
768} 744}
769 745
770static inline bool be_is_wol_excluded(struct be_adapter *adapter)
771{
772 struct pci_dev *pdev = adapter->pdev;
773
774 if (!be_physfn(adapter))
775 return true;
776
777 switch (pdev->subsystem_device) {
778 case OC_SUBSYS_DEVICE_ID1:
779 case OC_SUBSYS_DEVICE_ID2:
780 case OC_SUBSYS_DEVICE_ID3:
781 case OC_SUBSYS_DEVICE_ID4:
782 return true;
783 default:
784 return false;
785 }
786}
787
788static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
789{
790 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
791}
792
793#ifdef CONFIG_NET_RX_BUSY_POLL
794static inline bool be_lock_napi(struct be_eq_obj *eqo)
795{
796 bool status = true;
797
798 spin_lock(&eqo->lock); /* BH is already disabled */
799 if (eqo->state & BE_EQ_LOCKED) {
800 WARN_ON(eqo->state & BE_EQ_NAPI);
801 eqo->state |= BE_EQ_NAPI_YIELD;
802 status = false;
803 } else {
804 eqo->state = BE_EQ_NAPI;
805 }
806 spin_unlock(&eqo->lock);
807 return status;
808}
809
810static inline void be_unlock_napi(struct be_eq_obj *eqo)
811{
812 spin_lock(&eqo->lock); /* BH is already disabled */
813
814 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
815 eqo->state = BE_EQ_IDLE;
816
817 spin_unlock(&eqo->lock);
818}
819
820static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
821{
822 bool status = true;
823
824 spin_lock_bh(&eqo->lock);
825 if (eqo->state & BE_EQ_LOCKED) {
826 eqo->state |= BE_EQ_POLL_YIELD;
827 status = false;
828 } else {
829 eqo->state |= BE_EQ_POLL;
830 }
831 spin_unlock_bh(&eqo->lock);
832 return status;
833}
834
835static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
836{
837 spin_lock_bh(&eqo->lock);
838
839 WARN_ON(eqo->state & (BE_EQ_NAPI));
840 eqo->state = BE_EQ_IDLE;
841
842 spin_unlock_bh(&eqo->lock);
843}
844
845static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
846{
847 spin_lock_init(&eqo->lock);
848 eqo->state = BE_EQ_IDLE;
849}
850
851static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
852{
853 local_bh_disable();
854
855 /* It's enough to just acquire napi lock on the eqo to stop
856 * be_busy_poll() from processing any queueus.
857 */
858 while (!be_lock_napi(eqo))
859 mdelay(1);
860
861 local_bh_enable();
862}
863
864#else /* CONFIG_NET_RX_BUSY_POLL */
865
866static inline bool be_lock_napi(struct be_eq_obj *eqo)
867{
868 return true;
869}
870
871static inline void be_unlock_napi(struct be_eq_obj *eqo)
872{
873}
874
875static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
876{
877 return false;
878}
879
880static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
881{
882}
883
884static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
885{
886}
887
888static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
889{
890}
891#endif /* CONFIG_NET_RX_BUSY_POLL */
892
893void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 746void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
894 u16 num_popped); 747 u16 num_popped);
895void be_link_status_update(struct be_adapter *adapter, u8 link_status); 748void be_link_status_update(struct be_adapter *adapter, u8 link_status);
@@ -898,16 +751,6 @@ int be_load_fw(struct be_adapter *adapter, u8 *func);
898bool be_is_wol_supported(struct be_adapter *adapter); 751bool be_is_wol_supported(struct be_adapter *adapter);
899bool be_pause_supported(struct be_adapter *adapter); 752bool be_pause_supported(struct be_adapter *adapter);
900u32 be_get_fw_log_level(struct be_adapter *adapter); 753u32 be_get_fw_log_level(struct be_adapter *adapter);
901
902static inline int fw_major_num(const char *fw_ver)
903{
904 int fw_major = 0;
905
906 sscanf(fw_ver, "%d.", &fw_major);
907
908 return fw_major;
909}
910
911int be_update_queues(struct be_adapter *adapter); 754int be_update_queues(struct be_adapter *adapter);
912int be_poll(struct napi_struct *napi, int budget); 755int be_poll(struct napi_struct *napi, int budget);
913 756
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index fead5c65a4f0..36916cfa70f9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,6 +19,22 @@
19#include "be.h" 19#include "be.h"
20#include "be_cmds.h" 20#include "be_cmds.h"
21 21
22static char *be_port_misconfig_evt_desc[] = {
23 "A valid SFP module detected",
24 "Optics faulted/ incorrectly installed/ not installed.",
25 "Optics of two types installed.",
26 "Incompatible optics.",
27 "Unknown port SFP status"
28};
29
30static char *be_port_misconfig_remedy_desc[] = {
31 "",
32 "Reseat optics. If issue not resolved, replace",
33 "Remove one optic or install matching pair of optics",
34 "Replace with compatible optics for card to function",
35 ""
36};
37
22static struct be_cmd_priv_map cmd_priv_map[] = { 38static struct be_cmd_priv_map cmd_priv_map[] = {
23 { 39 {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 40 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
@@ -249,6 +265,29 @@ static void be_async_link_state_process(struct be_adapter *adapter,
249 evt->port_link_status & LINK_STATUS_MASK); 265 evt->port_link_status & LINK_STATUS_MASK);
250} 266}
251 267
268static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
269 struct be_mcc_compl *compl)
270{
271 struct be_async_event_misconfig_port *evt =
272 (struct be_async_event_misconfig_port *)compl;
273 u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
274 struct device *dev = &adapter->pdev->dev;
275 u8 port_misconfig_evt;
276
277 port_misconfig_evt =
278 ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);
279
280 /* Log an error message that would allow a user to determine
281 * whether the SFPs have an issue
282 */
283 dev_info(dev, "Port %c: %s %s", adapter->port_name,
284 be_port_misconfig_evt_desc[port_misconfig_evt],
285 be_port_misconfig_remedy_desc[port_misconfig_evt]);
286
287 if (port_misconfig_evt == INCOMPATIBLE_SFP)
288 adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
289}
290
252/* Grp5 CoS Priority evt */ 291/* Grp5 CoS Priority evt */
253static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 292static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
254 struct be_mcc_compl *compl) 293 struct be_mcc_compl *compl)
@@ -334,6 +373,16 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
334 } 373 }
335} 374}
336 375
376static void be_async_sliport_evt_process(struct be_adapter *adapter,
377 struct be_mcc_compl *cmp)
378{
379 u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
380 ASYNC_EVENT_TYPE_MASK;
381
382 if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
383 be_async_port_misconfig_event_process(adapter, cmp);
384}
385
337static inline bool is_link_state_evt(u32 flags) 386static inline bool is_link_state_evt(u32 flags)
338{ 387{
339 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 388 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
@@ -352,6 +401,12 @@ static inline bool is_dbg_evt(u32 flags)
352 ASYNC_EVENT_CODE_QNQ; 401 ASYNC_EVENT_CODE_QNQ;
353} 402}
354 403
404static inline bool is_sliport_evt(u32 flags)
405{
406 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
407 ASYNC_EVENT_CODE_SLIPORT;
408}
409
355static void be_mcc_event_process(struct be_adapter *adapter, 410static void be_mcc_event_process(struct be_adapter *adapter,
356 struct be_mcc_compl *compl) 411 struct be_mcc_compl *compl)
357{ 412{
@@ -361,6 +416,8 @@ static void be_mcc_event_process(struct be_adapter *adapter,
361 be_async_grp5_evt_process(adapter, compl); 416 be_async_grp5_evt_process(adapter, compl);
362 else if (is_dbg_evt(compl->flags)) 417 else if (is_dbg_evt(compl->flags))
363 be_async_dbg_evt_process(adapter, compl); 418 be_async_dbg_evt_process(adapter, compl);
419 else if (is_sliport_evt(compl->flags))
420 be_async_sliport_evt_process(adapter, compl);
364} 421}
365 422
366static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 423static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -573,7 +630,7 @@ static int lancer_wait_ready(struct be_adapter *adapter)
573{ 630{
574#define SLIPORT_READY_TIMEOUT 30 631#define SLIPORT_READY_TIMEOUT 30
575 u32 sliport_status; 632 u32 sliport_status;
576 int status = 0, i; 633 int i;
577 634
578 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { 635 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
579 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 636 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
@@ -584,9 +641,9 @@ static int lancer_wait_ready(struct be_adapter *adapter)
584 } 641 }
585 642
586 if (i == SLIPORT_READY_TIMEOUT) 643 if (i == SLIPORT_READY_TIMEOUT)
587 status = -1; 644 return sliport_status ? : -1;
588 645
589 return status; 646 return 0;
590} 647}
591 648
592static bool lancer_provisioning_error(struct be_adapter *adapter) 649static bool lancer_provisioning_error(struct be_adapter *adapter)
@@ -624,7 +681,7 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
624 iowrite32(SLI_PORT_CONTROL_IP_MASK, 681 iowrite32(SLI_PORT_CONTROL_IP_MASK,
625 adapter->db + SLIPORT_CONTROL_OFFSET); 682 adapter->db + SLIPORT_CONTROL_OFFSET);
626 683
627 /* check adapter has corrected the error */ 684 /* check if adapter has corrected the error */
628 status = lancer_wait_ready(adapter); 685 status = lancer_wait_ready(adapter);
629 sliport_status = ioread32(adapter->db + 686 sliport_status = ioread32(adapter->db +
630 SLIPORT_STATUS_OFFSET); 687 SLIPORT_STATUS_OFFSET);
@@ -655,7 +712,11 @@ int be_fw_wait_ready(struct be_adapter *adapter)
655 712
656 if (lancer_chip(adapter)) { 713 if (lancer_chip(adapter)) {
657 status = lancer_wait_ready(adapter); 714 status = lancer_wait_ready(adapter);
658 return status; 715 if (status) {
716 stage = status;
717 goto err;
718 }
719 return 0;
659 } 720 }
660 721
661 do { 722 do {
@@ -671,7 +732,8 @@ int be_fw_wait_ready(struct be_adapter *adapter)
671 timeout += 2; 732 timeout += 2;
672 } while (timeout < 60); 733 } while (timeout < 60);
673 734
674 dev_err(dev, "POST timeout; stage=0x%x\n", stage); 735err:
736 dev_err(dev, "POST timeout; stage=%#x\n", stage);
675 return -1; 737 return -1;
676} 738}
677 739
@@ -1166,9 +1228,15 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1166 ctxt, 1); 1228 ctxt, 1);
1167 } 1229 }
1168 1230
1169 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 1231 /* Subscribe to Link State, Sliport Event and Group 5 Events
1170 req->async_event_bitmap[0] = cpu_to_le32(0x00000022); 1232 * (bits 1, 5 and 17 set)
1171 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ); 1233 */
1234 req->async_event_bitmap[0] =
1235 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1236 BIT(ASYNC_EVENT_CODE_GRP_5) |
1237 BIT(ASYNC_EVENT_CODE_QNQ) |
1238 BIT(ASYNC_EVENT_CODE_SLIPORT));
1239
1172 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1240 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1173 1241
1174 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1242 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -1881,7 +1949,7 @@ err:
1881 return status; 1949 return status;
1882} 1950}
1883 1951
1884int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1952static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1885{ 1953{
1886 struct be_mcc_wrb *wrb; 1954 struct be_mcc_wrb *wrb;
1887 struct be_dma_mem *mem = &adapter->rx_filter; 1955 struct be_dma_mem *mem = &adapter->rx_filter;
@@ -1901,31 +1969,13 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1901 wrb, mem); 1969 wrb, mem);
1902 1970
1903 req->if_id = cpu_to_le32(adapter->if_handle); 1971 req->if_id = cpu_to_le32(adapter->if_handle);
1904 if (flags & IFF_PROMISC) { 1972 req->if_flags_mask = cpu_to_le32(flags);
1905 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1973 req->if_flags = (value == ON) ? req->if_flags_mask : 0;
1906 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1974
1907 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1975 if (flags & BE_IF_FLAGS_MULTICAST) {
1908 if (value == ON)
1909 req->if_flags =
1910 cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1911 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1912 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1913 } else if (flags & IFF_ALLMULTI) {
1914 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1915 req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1916 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1917 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1918
1919 if (value == ON)
1920 req->if_flags =
1921 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1922 } else {
1923 struct netdev_hw_addr *ha; 1976 struct netdev_hw_addr *ha;
1924 int i = 0; 1977 int i = 0;
1925 1978
1926 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1927 req->if_flags = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1928
1929 /* Reset mcast promisc mode if already set by setting mask 1979 /* Reset mcast promisc mode if already set by setting mask
1930 * and not setting flags field 1980 * and not setting flags field
1931 */ 1981 */
@@ -1937,24 +1987,26 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1937 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1987 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1938 } 1988 }
1939 1989
1940 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
1941 req->if_flags_mask) {
1942 dev_warn(&adapter->pdev->dev,
1943 "Cannot set rx filter flags 0x%x\n",
1944 req->if_flags_mask);
1945 dev_warn(&adapter->pdev->dev,
1946 "Interface is capable of 0x%x flags only\n",
1947 be_if_cap_flags(adapter));
1948 }
1949 req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
1950
1951 status = be_mcc_notify_wait(adapter); 1990 status = be_mcc_notify_wait(adapter);
1952
1953err: 1991err:
1954 spin_unlock_bh(&adapter->mcc_lock); 1992 spin_unlock_bh(&adapter->mcc_lock);
1955 return status; 1993 return status;
1956} 1994}
1957 1995
1996int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1997{
1998 struct device *dev = &adapter->pdev->dev;
1999
2000 if ((flags & be_if_cap_flags(adapter)) != flags) {
2001 dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
2002 dev_warn(dev, "Interface is capable of 0x%x flags only\n",
2003 be_if_cap_flags(adapter));
2004 }
2005 flags &= be_if_cap_flags(adapter);
2006
2007 return __be_cmd_rx_filter(adapter, flags, value);
2008}
2009
1958/* Uses synchrounous mcc */ 2010/* Uses synchrounous mcc */
1959int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) 2011int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1960{ 2012{
@@ -2355,6 +2407,24 @@ int be_cmd_query_cable_type(struct be_adapter *adapter)
2355 return status; 2407 return status;
2356} 2408}
2357 2409
2410int be_cmd_query_sfp_info(struct be_adapter *adapter)
2411{
2412 u8 page_data[PAGE_DATA_LEN];
2413 int status;
2414
2415 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2416 page_data);
2417 if (!status) {
2418 strlcpy(adapter->phy.vendor_name, page_data +
2419 SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2420 strlcpy(adapter->phy.vendor_pn,
2421 page_data + SFP_VENDOR_PN_OFFSET,
2422 SFP_VENDOR_NAME_LEN - 1);
2423 }
2424
2425 return status;
2426}
2427
2358int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) 2428int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
2359{ 2429{
2360 struct lancer_cmd_req_delete_object *req; 2430 struct lancer_cmd_req_delete_object *req;
@@ -2431,7 +2501,8 @@ err_unlock:
2431} 2501}
2432 2502
2433int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2503int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2434 u32 flash_type, u32 flash_opcode, u32 buf_size) 2504 u32 flash_type, u32 flash_opcode, u32 img_offset,
2505 u32 buf_size)
2435{ 2506{
2436 struct be_mcc_wrb *wrb; 2507 struct be_mcc_wrb *wrb;
2437 struct be_cmd_write_flashrom *req; 2508 struct be_cmd_write_flashrom *req;
@@ -2452,6 +2523,9 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2452 cmd); 2523 cmd);
2453 2524
2454 req->params.op_type = cpu_to_le32(flash_type); 2525 req->params.op_type = cpu_to_le32(flash_type);
2526 if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2527 req->params.offset = cpu_to_le32(img_offset);
2528
2455 req->params.op_code = cpu_to_le32(flash_opcode); 2529 req->params.op_code = cpu_to_le32(flash_opcode);
2456 req->params.data_buf_size = cpu_to_le32(buf_size); 2530 req->params.data_buf_size = cpu_to_le32(buf_size);
2457 2531
@@ -2472,10 +2546,10 @@ err_unlock:
2472} 2546}
2473 2547
2474int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2548int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2475 u16 optype, int offset) 2549 u16 img_optype, u32 img_offset, u32 crc_offset)
2476{ 2550{
2477 struct be_mcc_wrb *wrb;
2478 struct be_cmd_read_flash_crc *req; 2551 struct be_cmd_read_flash_crc *req;
2552 struct be_mcc_wrb *wrb;
2479 int status; 2553 int status;
2480 2554
2481 spin_lock_bh(&adapter->mcc_lock); 2555 spin_lock_bh(&adapter->mcc_lock);
@@ -2491,9 +2565,13 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2491 OPCODE_COMMON_READ_FLASHROM, sizeof(*req), 2565 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2492 wrb, NULL); 2566 wrb, NULL);
2493 2567
2494 req->params.op_type = cpu_to_le32(optype); 2568 req->params.op_type = cpu_to_le32(img_optype);
2569 if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2570 req->params.offset = cpu_to_le32(img_offset + crc_offset);
2571 else
2572 req->params.offset = cpu_to_le32(crc_offset);
2573
2495 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2574 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2496 req->params.offset = cpu_to_le32(offset);
2497 req->params.data_buf_size = cpu_to_le32(0x4); 2575 req->params.data_buf_size = cpu_to_le32(0x4);
2498 2576
2499 status = be_mcc_notify_wait(adapter); 2577 status = be_mcc_notify_wait(adapter);
@@ -2742,7 +2820,7 @@ err:
2742 return status; 2820 return status;
2743} 2821}
2744 2822
2745int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) 2823static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2746{ 2824{
2747 struct be_mcc_wrb *wrb; 2825 struct be_mcc_wrb *wrb;
2748 struct be_cmd_req_set_qos *req; 2826 struct be_cmd_req_set_qos *req;
@@ -3236,6 +3314,24 @@ err:
3236 return status; 3314 return status;
3237} 3315}
3238 3316
3317static bool be_is_wol_excluded(struct be_adapter *adapter)
3318{
3319 struct pci_dev *pdev = adapter->pdev;
3320
3321 if (!be_physfn(adapter))
3322 return true;
3323
3324 switch (pdev->subsystem_device) {
3325 case OC_SUBSYS_DEVICE_ID1:
3326 case OC_SUBSYS_DEVICE_ID2:
3327 case OC_SUBSYS_DEVICE_ID3:
3328 case OC_SUBSYS_DEVICE_ID4:
3329 return true;
3330 default:
3331 return false;
3332 }
3333}
3334
3239int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) 3335int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3240{ 3336{
3241 struct be_mcc_wrb *wrb; 3337 struct be_mcc_wrb *wrb;
@@ -3422,42 +3518,34 @@ err:
3422 return status; 3518 return status;
3423} 3519}
3424 3520
3425int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name) 3521int be_cmd_query_port_name(struct be_adapter *adapter)
3426{ 3522{
3427 struct be_mcc_wrb *wrb;
3428 struct be_cmd_req_get_port_name *req; 3523 struct be_cmd_req_get_port_name *req;
3524 struct be_mcc_wrb *wrb;
3429 int status; 3525 int status;
3430 3526
3431 if (!lancer_chip(adapter)) { 3527 if (mutex_lock_interruptible(&adapter->mbox_lock))
3432 *port_name = adapter->hba_port_num + '0'; 3528 return -1;
3433 return 0;
3434 }
3435
3436 spin_lock_bh(&adapter->mcc_lock);
3437
3438 wrb = wrb_from_mccq(adapter);
3439 if (!wrb) {
3440 status = -EBUSY;
3441 goto err;
3442 }
3443 3529
3530 wrb = wrb_from_mbox(adapter);
3444 req = embedded_payload(wrb); 3531 req = embedded_payload(wrb);
3445 3532
3446 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3533 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3447 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb, 3534 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3448 NULL); 3535 NULL);
3449 req->hdr.version = 1; 3536 if (!BEx_chip(adapter))
3537 req->hdr.version = 1;
3450 3538
3451 status = be_mcc_notify_wait(adapter); 3539 status = be_mbox_notify_wait(adapter);
3452 if (!status) { 3540 if (!status) {
3453 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); 3541 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3454 3542
3455 *port_name = resp->port_name[adapter->hba_port_num]; 3543 adapter->port_name = resp->port_name[adapter->hba_port_num];
3456 } else { 3544 } else {
3457 *port_name = adapter->hba_port_num + '0'; 3545 adapter->port_name = adapter->hba_port_num + '0';
3458 } 3546 }
3459err: 3547
3460 spin_unlock_bh(&adapter->mcc_lock); 3548 mutex_unlock(&adapter->mbox_lock);
3461 return status; 3549 return status;
3462} 3550}
3463 3551
@@ -3751,6 +3839,7 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3751 be_reset_nic_desc(&nic_desc); 3839 be_reset_nic_desc(&nic_desc);
3752 nic_desc.pf_num = adapter->pf_number; 3840 nic_desc.pf_num = adapter->pf_number;
3753 nic_desc.vf_num = domain; 3841 nic_desc.vf_num = domain;
3842 nic_desc.bw_min = 0;
3754 if (lancer_chip(adapter)) { 3843 if (lancer_chip(adapter)) {
3755 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3844 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3756 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; 3845 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
@@ -4092,7 +4181,7 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4092 int status; 4181 int status;
4093 4182
4094 if (BEx_chip(adapter) || lancer_chip(adapter)) 4183 if (BEx_chip(adapter) || lancer_chip(adapter))
4095 return 0; 4184 return -EOPNOTSUPP;
4096 4185
4097 spin_lock_bh(&adapter->mcc_lock); 4186 spin_lock_bh(&adapter->mcc_lock);
4098 4187
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index eb5085d6794f..db761e8e42a3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -44,10 +44,10 @@ struct be_mcc_wrb {
44 } payload; 44 } payload;
45}; 45};
46 46
47#define CQE_FLAGS_VALID_MASK (1 << 31) 47#define CQE_FLAGS_VALID_MASK BIT(31)
48#define CQE_FLAGS_ASYNC_MASK (1 << 30) 48#define CQE_FLAGS_ASYNC_MASK BIT(30)
49#define CQE_FLAGS_COMPLETED_MASK (1 << 28) 49#define CQE_FLAGS_COMPLETED_MASK BIT(28)
50#define CQE_FLAGS_CONSUMED_MASK (1 << 27) 50#define CQE_FLAGS_CONSUMED_MASK BIT(27)
51 51
52/* Completion Status */ 52/* Completion Status */
53enum mcc_base_status { 53enum mcc_base_status {
@@ -102,6 +102,8 @@ struct be_mcc_compl {
102#define ASYNC_EVENT_PVID_STATE 0x3 102#define ASYNC_EVENT_PVID_STATE 0x3
103#define ASYNC_EVENT_CODE_QNQ 0x6 103#define ASYNC_EVENT_CODE_QNQ 0x6
104#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1 104#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
105#define ASYNC_EVENT_CODE_SLIPORT 0x11
106#define ASYNC_EVENT_PORT_MISCONFIG 0x9
105 107
106enum { 108enum {
107 LINK_DOWN = 0x0, 109 LINK_DOWN = 0x0,
@@ -169,6 +171,15 @@ struct be_async_event_qnq {
169 u32 flags; 171 u32 flags;
170} __packed; 172} __packed;
171 173
174#define INCOMPATIBLE_SFP 0x3
175/* async event indicating misconfigured port */
176struct be_async_event_misconfig_port {
177 u32 event_data_word1;
178 u32 event_data_word2;
179 u32 rsvd0;
180 u32 flags;
181} __packed;
182
172struct be_mcc_mailbox { 183struct be_mcc_mailbox {
173 struct be_mcc_wrb wrb; 184 struct be_mcc_wrb wrb;
174 struct be_mcc_compl compl; 185 struct be_mcc_compl compl;
@@ -586,6 +597,10 @@ enum be_if_flags {
586 BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ 597 BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
587 BE_IF_FLAGS_UNTAGGED) 598 BE_IF_FLAGS_UNTAGGED)
588 599
600#define BE_IF_FLAGS_ALL_PROMISCUOUS (BE_IF_FLAGS_PROMISCUOUS | \
601 BE_IF_FLAGS_VLAN_PROMISCUOUS |\
602 BE_IF_FLAGS_MCAST_PROMISCUOUS)
603
589/* An RX interface is an object with one or more MAC addresses and 604/* An RX interface is an object with one or more MAC addresses and
590 * filtering capabilities. */ 605 * filtering capabilities. */
591struct be_cmd_req_if_create { 606struct be_cmd_req_if_create {
@@ -1024,6 +1039,8 @@ enum {
1024#define SFP_PLUS_SFF_8472_COMP 0x5E 1039#define SFP_PLUS_SFF_8472_COMP 0x5E
1025#define SFP_PLUS_CABLE_TYPE_OFFSET 0x8 1040#define SFP_PLUS_CABLE_TYPE_OFFSET 0x8
1026#define SFP_PLUS_COPPER_CABLE 0x4 1041#define SFP_PLUS_COPPER_CABLE 0x4
1042#define SFP_VENDOR_NAME_OFFSET 0x14
1043#define SFP_VENDOR_PN_OFFSET 0x28
1027 1044
1028#define PAGE_DATA_LEN 256 1045#define PAGE_DATA_LEN 256
1029struct be_cmd_resp_port_type { 1046struct be_cmd_resp_port_type {
@@ -1091,6 +1108,10 @@ struct be_cmd_req_query_fw_cfg {
1091 u32 rsvd[31]; 1108 u32 rsvd[31];
1092}; 1109};
1093 1110
1111/* ASIC revisions */
1112#define ASIC_REV_B0 0x10
1113#define ASIC_REV_P2 0x11
1114
1094struct be_cmd_resp_query_fw_cfg { 1115struct be_cmd_resp_query_fw_cfg {
1095 struct be_cmd_resp_hdr hdr; 1116 struct be_cmd_resp_hdr hdr;
1096 u32 be_config_number; 1117 u32 be_config_number;
@@ -1161,7 +1182,173 @@ struct be_cmd_resp_get_beacon_state {
1161 u8 rsvd0[3]; 1182 u8 rsvd0[3];
1162} __packed; 1183} __packed;
1163 1184
1185/* Flashrom related descriptors */
1186#define MAX_FLASH_COMP 32
1187
1188#define OPTYPE_ISCSI_ACTIVE 0
1189#define OPTYPE_REDBOOT 1
1190#define OPTYPE_BIOS 2
1191#define OPTYPE_PXE_BIOS 3
1192#define OPTYPE_OFFSET_SPECIFIED 7
1193#define OPTYPE_FCOE_BIOS 8
1194#define OPTYPE_ISCSI_BACKUP 9
1195#define OPTYPE_FCOE_FW_ACTIVE 10
1196#define OPTYPE_FCOE_FW_BACKUP 11
1197#define OPTYPE_NCSI_FW 13
1198#define OPTYPE_REDBOOT_DIR 18
1199#define OPTYPE_REDBOOT_CONFIG 19
1200#define OPTYPE_SH_PHY_FW 21
1201#define OPTYPE_FLASHISM_JUMPVECTOR 22
1202#define OPTYPE_UFI_DIR 23
1203#define OPTYPE_PHY_FW 99
1204
1205#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 262144 /* Max OPTION ROM image sz */
1206#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 262144 /* Max Redboot image sz */
1207#define FLASH_IMAGE_MAX_SIZE_g2 1310720 /* Max firmware image size */
1208
1209#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 262144
1210#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
1211#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 524288 /* Max OPTION ROM image sz */
1212#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 1048576 /* Max Redboot image sz */
1213#define FLASH_IMAGE_MAX_SIZE_g3 2097152 /* Max firmware image size */
1214
1215/* Offsets for components on Flash. */
1216#define FLASH_REDBOOT_START_g2 0
1217#define FLASH_FCoE_BIOS_START_g2 524288
1218#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 1048576
1219#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 2359296
1220#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 3670016
1221#define FLASH_FCoE_BACKUP_IMAGE_START_g2 4980736
1222#define FLASH_iSCSI_BIOS_START_g2 7340032
1223#define FLASH_PXE_BIOS_START_g2 7864320
1224
1225#define FLASH_REDBOOT_START_g3 262144
1226#define FLASH_PHY_FW_START_g3 1310720
1227#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 2097152
1228#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 4194304
1229#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 6291456
1230#define FLASH_FCoE_BACKUP_IMAGE_START_g3 8388608
1231#define FLASH_iSCSI_BIOS_START_g3 12582912
1232#define FLASH_PXE_BIOS_START_g3 13107200
1233#define FLASH_FCoE_BIOS_START_g3 13631488
1234#define FLASH_NCSI_START_g3 15990784
1235
1236#define IMAGE_NCSI 16
1237#define IMAGE_OPTION_ROM_PXE 32
1238#define IMAGE_OPTION_ROM_FCoE 33
1239#define IMAGE_OPTION_ROM_ISCSI 34
1240#define IMAGE_FLASHISM_JUMPVECTOR 48
1241#define IMAGE_FIRMWARE_iSCSI 160
1242#define IMAGE_FIRMWARE_FCoE 162
1243#define IMAGE_FIRMWARE_BACKUP_iSCSI 176
1244#define IMAGE_FIRMWARE_BACKUP_FCoE 178
1245#define IMAGE_FIRMWARE_PHY 192
1246#define IMAGE_REDBOOT_DIR 208
1247#define IMAGE_REDBOOT_CONFIG 209
1248#define IMAGE_UFI_DIR 210
1249#define IMAGE_BOOT_CODE 224
1250
1251struct controller_id {
1252 u32 vendor;
1253 u32 device;
1254 u32 subvendor;
1255 u32 subdevice;
1256};
1257
1258struct flash_comp {
1259 unsigned long offset;
1260 int optype;
1261 int size;
1262 int img_type;
1263};
1264
1265struct image_hdr {
1266 u32 imageid;
1267 u32 imageoffset;
1268 u32 imagelength;
1269 u32 image_checksum;
1270 u8 image_version[32];
1271};
1272
1273struct flash_file_hdr_g2 {
1274 u8 sign[32];
1275 u32 cksum;
1276 u32 antidote;
1277 struct controller_id cont_id;
1278 u32 file_len;
1279 u32 chunk_num;
1280 u32 total_chunks;
1281 u32 num_imgs;
1282 u8 build[24];
1283};
1284
1285/* First letter of the build version of the image */
1286#define BLD_STR_UFI_TYPE_BE2 '2'
1287#define BLD_STR_UFI_TYPE_BE3 '3'
1288#define BLD_STR_UFI_TYPE_SH '4'
1289
1290struct flash_file_hdr_g3 {
1291 u8 sign[52];
1292 u8 ufi_version[4];
1293 u32 file_len;
1294 u32 cksum;
1295 u32 antidote;
1296 u32 num_imgs;
1297 u8 build[24];
1298 u8 asic_type_rev;
1299 u8 rsvd[31];
1300};
1301
1302struct flash_section_hdr {
1303 u32 format_rev;
1304 u32 cksum;
1305 u32 antidote;
1306 u32 num_images;
1307 u8 id_string[128];
1308 u32 rsvd[4];
1309} __packed;
1310
1311struct flash_section_hdr_g2 {
1312 u32 format_rev;
1313 u32 cksum;
1314 u32 antidote;
1315 u32 build_num;
1316 u8 id_string[128];
1317 u32 rsvd[8];
1318} __packed;
1319
1320struct flash_section_entry {
1321 u32 type;
1322 u32 offset;
1323 u32 pad_size;
1324 u32 image_size;
1325 u32 cksum;
1326 u32 entry_point;
1327 u16 optype;
1328 u16 rsvd0;
1329 u32 rsvd1;
1330 u8 ver_data[32];
1331} __packed;
1332
1333struct flash_section_info {
1334 u8 cookie[32];
1335 struct flash_section_hdr fsec_hdr;
1336 struct flash_section_entry fsec_entry[32];
1337} __packed;
1338
1339struct flash_section_info_g2 {
1340 u8 cookie[32];
1341 struct flash_section_hdr_g2 fsec_hdr;
1342 struct flash_section_entry fsec_entry[32];
1343} __packed;
1344
1164/****************** Firmware Flash ******************/ 1345/****************** Firmware Flash ******************/
1346#define FLASHROM_OPER_FLASH 1
1347#define FLASHROM_OPER_SAVE 2
1348#define FLASHROM_OPER_REPORT 4
1349#define FLASHROM_OPER_PHY_FLASH 9
1350#define FLASHROM_OPER_PHY_SAVE 10
1351
1165struct flashrom_params { 1352struct flashrom_params {
1166 u32 op_code; 1353 u32 op_code;
1167 u32 op_type; 1354 u32 op_type;
@@ -1366,6 +1553,7 @@ enum {
1366 PHY_TYPE_QSFP, 1553 PHY_TYPE_QSFP,
1367 PHY_TYPE_KR4_40GB, 1554 PHY_TYPE_KR4_40GB,
1368 PHY_TYPE_KR2_20GB, 1555 PHY_TYPE_KR2_20GB,
1556 PHY_TYPE_TN_8022,
1369 PHY_TYPE_DISABLED = 255 1557 PHY_TYPE_DISABLED = 255
1370}; 1558};
1371 1559
@@ -1429,6 +1617,20 @@ struct be_cmd_req_set_qos {
1429}; 1617};
1430 1618
1431/*********************** Controller Attributes ***********************/ 1619/*********************** Controller Attributes ***********************/
1620struct mgmt_hba_attribs {
1621 u32 rsvd0[24];
1622 u8 controller_model_number[32];
1623 u32 rsvd1[79];
1624 u8 rsvd2[3];
1625 u8 phy_port;
1626 u32 rsvd3[13];
1627} __packed;
1628
1629struct mgmt_controller_attrib {
1630 struct mgmt_hba_attribs hba_attribs;
1631 u32 rsvd0[10];
1632} __packed;
1633
1432struct be_cmd_req_cntl_attribs { 1634struct be_cmd_req_cntl_attribs {
1433 struct be_cmd_req_hdr hdr; 1635 struct be_cmd_req_hdr hdr;
1434}; 1636};
@@ -2070,8 +2272,10 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
2070int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, 2272int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2071 u8 page_num, u8 *data); 2273 u8 page_num, u8 *data);
2072int be_cmd_query_cable_type(struct be_adapter *adapter); 2274int be_cmd_query_cable_type(struct be_adapter *adapter);
2275int be_cmd_query_sfp_info(struct be_adapter *adapter);
2073int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2276int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2074 u32 flash_oper, u32 flash_opcode, u32 buf_size); 2277 u32 flash_oper, u32 flash_opcode, u32 img_offset,
2278 u32 buf_size);
2075int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2279int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2076 u32 data_size, u32 data_offset, 2280 u32 data_size, u32 data_offset,
2077 const char *obj_name, u32 *data_written, 2281 const char *obj_name, u32 *data_written,
@@ -2081,7 +2285,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2081 u32 *data_read, u32 *eof, u8 *addn_status); 2285 u32 *data_read, u32 *eof, u8 *addn_status);
2082int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name); 2286int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name);
2083int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2287int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2084 u16 optype, int offset); 2288 u16 img_optype, u32 img_offset, u32 crc_offset);
2085int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2289int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2086 struct be_dma_mem *nonemb_cmd); 2290 struct be_dma_mem *nonemb_cmd);
2087int be_cmd_fw_init(struct be_adapter *adapter); 2291int be_cmd_fw_init(struct be_adapter *adapter);
@@ -2136,7 +2340,7 @@ int lancer_initiate_dump(struct be_adapter *adapter);
2136int lancer_delete_dump(struct be_adapter *adapter); 2340int lancer_delete_dump(struct be_adapter *adapter);
2137bool dump_present(struct be_adapter *adapter); 2341bool dump_present(struct be_adapter *adapter);
2138int lancer_test_and_set_rdy_state(struct be_adapter *adapter); 2342int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
2139int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); 2343int be_cmd_query_port_name(struct be_adapter *adapter);
2140int be_cmd_get_func_config(struct be_adapter *adapter, 2344int be_cmd_get_func_config(struct be_adapter *adapter,
2141 struct be_resources *res); 2345 struct be_resources *res);
2142int be_cmd_get_profile_config(struct be_adapter *adapter, 2346int be_cmd_get_profile_config(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 73a500ccbf69..4d2de4700769 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -193,8 +193,6 @@ static const struct be_ethtool_stat et_tx_stats[] = {
193 {DRVSTAT_TX_INFO(tx_pkts)}, 193 {DRVSTAT_TX_INFO(tx_pkts)},
194 /* Number of skbs queued for trasmission by the driver */ 194 /* Number of skbs queued for trasmission by the driver */
195 {DRVSTAT_TX_INFO(tx_reqs)}, 195 {DRVSTAT_TX_INFO(tx_reqs)},
196 /* Number of TX work request blocks DMAed to HW */
197 {DRVSTAT_TX_INFO(tx_wrbs)},
198 /* Number of times the TX queue was stopped due to lack 196 /* Number of times the TX queue was stopped due to lack
199 * of spaces in the TXQ. 197 * of spaces in the TXQ.
200 */ 198 */
@@ -707,15 +705,17 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
707 705
708 if (ecmd->autoneg != adapter->phy.fc_autoneg) 706 if (ecmd->autoneg != adapter->phy.fc_autoneg)
709 return -EINVAL; 707 return -EINVAL;
710 adapter->tx_fc = ecmd->tx_pause;
711 adapter->rx_fc = ecmd->rx_pause;
712 708
713 status = be_cmd_set_flow_control(adapter, 709 status = be_cmd_set_flow_control(adapter, ecmd->tx_pause,
714 adapter->tx_fc, adapter->rx_fc); 710 ecmd->rx_pause);
715 if (status) 711 if (status) {
716 dev_warn(&adapter->pdev->dev, "Pause param set failed\n"); 712 dev_warn(&adapter->pdev->dev, "Pause param set failed\n");
713 return be_cmd_status(status);
714 }
717 715
718 return be_cmd_status(status); 716 adapter->tx_fc = ecmd->tx_pause;
717 adapter->rx_fc = ecmd->rx_pause;
718 return 0;
719} 719}
720 720
721static int be_set_phys_id(struct net_device *netdev, 721static int be_set_phys_id(struct net_device *netdev,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 295ee0835ba0..48840889db62 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -75,7 +75,7 @@
75 * atomically without having to arbitrate for the PCI Interrupt Disable bit 75 * atomically without having to arbitrate for the PCI Interrupt Disable bit
76 * with the OS. 76 * with the OS.
77 */ 77 */
78#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */ 78#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK BIT(29) /* bit 29 */
79 79
80/********* PCI Function Capability *********/ 80/********* PCI Function Capability *********/
81#define BE_FUNCTION_CAPS_RSS 0x2 81#define BE_FUNCTION_CAPS_RSS 0x2
@@ -171,94 +171,6 @@
171#define RETRIEVE_FAT 0 171#define RETRIEVE_FAT 0
172#define QUERY_FAT 1 172#define QUERY_FAT 1
173 173
174/* Flashrom related descriptors */
175#define MAX_FLASH_COMP 32
176#define IMAGE_TYPE_FIRMWARE 160
177#define IMAGE_TYPE_BOOTCODE 224
178#define IMAGE_TYPE_OPTIONROM 32
179
180#define NUM_FLASHDIR_ENTRIES 32
181
182#define OPTYPE_ISCSI_ACTIVE 0
183#define OPTYPE_REDBOOT 1
184#define OPTYPE_BIOS 2
185#define OPTYPE_PXE_BIOS 3
186#define OPTYPE_FCOE_BIOS 8
187#define OPTYPE_ISCSI_BACKUP 9
188#define OPTYPE_FCOE_FW_ACTIVE 10
189#define OPTYPE_FCOE_FW_BACKUP 11
190#define OPTYPE_NCSI_FW 13
191#define OPTYPE_REDBOOT_DIR 18
192#define OPTYPE_REDBOOT_CONFIG 19
193#define OPTYPE_SH_PHY_FW 21
194#define OPTYPE_FLASHISM_JUMPVECTOR 22
195#define OPTYPE_UFI_DIR 23
196#define OPTYPE_PHY_FW 99
197#define TN_8022 13
198
199#define FLASHROM_OPER_PHY_FLASH 9
200#define FLASHROM_OPER_PHY_SAVE 10
201#define FLASHROM_OPER_FLASH 1
202#define FLASHROM_OPER_SAVE 2
203#define FLASHROM_OPER_REPORT 4
204
205#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
206#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
207#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
208#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
209#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
210#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
211#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
212#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
213
214#define FLASH_NCSI_MAGIC (0x16032009)
215#define FLASH_NCSI_DISABLED (0)
216#define FLASH_NCSI_ENABLED (1)
217
218#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
219
220/* Offsets for components on Flash. */
221#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
222#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
223#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
224#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
225#define FLASH_iSCSI_BIOS_START_g2 (7340032)
226#define FLASH_PXE_BIOS_START_g2 (7864320)
227#define FLASH_FCoE_BIOS_START_g2 (524288)
228#define FLASH_REDBOOT_START_g2 (0)
229
230#define FLASH_NCSI_START_g3 (15990784)
231#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
232#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
233#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
234#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
235#define FLASH_iSCSI_BIOS_START_g3 (12582912)
236#define FLASH_PXE_BIOS_START_g3 (13107200)
237#define FLASH_FCoE_BIOS_START_g3 (13631488)
238#define FLASH_REDBOOT_START_g3 (262144)
239#define FLASH_PHY_FW_START_g3 1310720
240
241#define IMAGE_NCSI 16
242#define IMAGE_OPTION_ROM_PXE 32
243#define IMAGE_OPTION_ROM_FCoE 33
244#define IMAGE_OPTION_ROM_ISCSI 34
245#define IMAGE_FLASHISM_JUMPVECTOR 48
246#define IMAGE_FLASH_ISM 49
247#define IMAGE_JUMP_VECTOR 50
248#define IMAGE_FIRMWARE_iSCSI 160
249#define IMAGE_FIRMWARE_COMP_iSCSI 161
250#define IMAGE_FIRMWARE_FCoE 162
251#define IMAGE_FIRMWARE_COMP_FCoE 163
252#define IMAGE_FIRMWARE_BACKUP_iSCSI 176
253#define IMAGE_FIRMWARE_BACKUP_COMP_iSCSI 177
254#define IMAGE_FIRMWARE_BACKUP_FCoE 178
255#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
256#define IMAGE_FIRMWARE_PHY 192
257#define IMAGE_REDBOOT_DIR 208
258#define IMAGE_REDBOOT_CONFIG 209
259#define IMAGE_UFI_DIR 210
260#define IMAGE_BOOT_CODE 224
261
262/************* Rx Packet Type Encoding **************/ 174/************* Rx Packet Type Encoding **************/
263#define BE_UNICAST_PACKET 0 175#define BE_UNICAST_PACKET 0
264#define BE_MULTICAST_PACKET 1 176#define BE_MULTICAST_PACKET 1
@@ -281,10 +193,10 @@ struct be_eq_entry {
281/* TX Queue Descriptor */ 193/* TX Queue Descriptor */
282#define ETH_WRB_FRAG_LEN_MASK 0xFFFF 194#define ETH_WRB_FRAG_LEN_MASK 0xFFFF
283struct be_eth_wrb { 195struct be_eth_wrb {
284 u32 frag_pa_hi; /* dword 0 */ 196 __le32 frag_pa_hi; /* dword 0 */
285 u32 frag_pa_lo; /* dword 1 */ 197 __le32 frag_pa_lo; /* dword 1 */
286 u32 rsvd0; /* dword 2 */ 198 u32 rsvd0; /* dword 2 */
287 u32 frag_len; /* dword 3: bits 0 - 15 */ 199 __le32 frag_len; /* dword 3: bits 0 - 15 */
288} __packed; 200} __packed;
289 201
290/* Pseudo amap definition for eth_hdr_wrb in which each bit of the 202/* Pseudo amap definition for eth_hdr_wrb in which each bit of the
@@ -311,8 +223,13 @@ struct amap_eth_hdr_wrb {
311 u8 vlan_tag[16]; 223 u8 vlan_tag[16];
312} __packed; 224} __packed;
313 225
226#define TX_HDR_WRB_COMPL 1 /* word 2 */
227#define TX_HDR_WRB_EVT BIT(1) /* word 2 */
228#define TX_HDR_WRB_NUM_SHIFT 13 /* word 2: bits 13:17 */
229#define TX_HDR_WRB_NUM_MASK 0x1F /* word 2: bits 13:17 */
230
314struct be_eth_hdr_wrb { 231struct be_eth_hdr_wrb {
315 u32 dw[4]; 232 __le32 dw[4];
316}; 233};
317 234
318/********* Tx Compl Status Encoding *********/ 235/********* Tx Compl Status Encoding *********/
@@ -435,138 +352,3 @@ struct amap_eth_rx_compl_v1 {
435struct be_eth_rx_compl { 352struct be_eth_rx_compl {
436 u32 dw[4]; 353 u32 dw[4];
437}; 354};
438
439struct mgmt_hba_attribs {
440 u8 flashrom_version_string[32];
441 u8 manufacturer_name[32];
442 u32 supported_modes;
443 u32 rsvd0[3];
444 u8 ncsi_ver_string[12];
445 u32 default_extended_timeout;
446 u8 controller_model_number[32];
447 u8 controller_description[64];
448 u8 controller_serial_number[32];
449 u8 ip_version_string[32];
450 u8 firmware_version_string[32];
451 u8 bios_version_string[32];
452 u8 redboot_version_string[32];
453 u8 driver_version_string[32];
454 u8 fw_on_flash_version_string[32];
455 u32 functionalities_supported;
456 u16 max_cdblength;
457 u8 asic_revision;
458 u8 generational_guid[16];
459 u8 hba_port_count;
460 u16 default_link_down_timeout;
461 u8 iscsi_ver_min_max;
462 u8 multifunction_device;
463 u8 cache_valid;
464 u8 hba_status;
465 u8 max_domains_supported;
466 u8 phy_port;
467 u32 firmware_post_status;
468 u32 hba_mtu[8];
469 u32 rsvd1[4];
470};
471
472struct mgmt_controller_attrib {
473 struct mgmt_hba_attribs hba_attribs;
474 u16 pci_vendor_id;
475 u16 pci_device_id;
476 u16 pci_sub_vendor_id;
477 u16 pci_sub_system_id;
478 u8 pci_bus_number;
479 u8 pci_device_number;
480 u8 pci_function_number;
481 u8 interface_type;
482 u64 unique_identifier;
483 u32 rsvd0[5];
484};
485
486struct controller_id {
487 u32 vendor;
488 u32 device;
489 u32 subvendor;
490 u32 subdevice;
491};
492
493struct flash_comp {
494 unsigned long offset;
495 int optype;
496 int size;
497 int img_type;
498};
499
500struct image_hdr {
501 u32 imageid;
502 u32 imageoffset;
503 u32 imagelength;
504 u32 image_checksum;
505 u8 image_version[32];
506};
507struct flash_file_hdr_g2 {
508 u8 sign[32];
509 u32 cksum;
510 u32 antidote;
511 struct controller_id cont_id;
512 u32 file_len;
513 u32 chunk_num;
514 u32 total_chunks;
515 u32 num_imgs;
516 u8 build[24];
517};
518
519struct flash_file_hdr_g3 {
520 u8 sign[52];
521 u8 ufi_version[4];
522 u32 file_len;
523 u32 cksum;
524 u32 antidote;
525 u32 num_imgs;
526 u8 build[24];
527 u8 asic_type_rev;
528 u8 rsvd[31];
529};
530
531struct flash_section_hdr {
532 u32 format_rev;
533 u32 cksum;
534 u32 antidote;
535 u32 num_images;
536 u8 id_string[128];
537 u32 rsvd[4];
538} __packed;
539
540struct flash_section_hdr_g2 {
541 u32 format_rev;
542 u32 cksum;
543 u32 antidote;
544 u32 build_num;
545 u8 id_string[128];
546 u32 rsvd[8];
547} __packed;
548
549struct flash_section_entry {
550 u32 type;
551 u32 offset;
552 u32 pad_size;
553 u32 image_size;
554 u32 cksum;
555 u32 entry_point;
556 u16 optype;
557 u16 rsvd0;
558 u32 rsvd1;
559 u8 ver_data[32];
560} __packed;
561
562struct flash_section_info {
563 u8 cookie[32];
564 struct flash_section_hdr fsec_hdr;
565 struct flash_section_entry fsec_entry[32];
566} __packed;
567
568struct flash_section_info_g2 {
569 u8 cookie[32];
570 struct flash_section_hdr_g2 fsec_hdr;
571 struct flash_section_entry fsec_entry[32];
572} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d48806b5cd88..932b93a14965 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -662,48 +662,40 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
662 netif_carrier_off(netdev); 662 netif_carrier_off(netdev);
663} 663}
664 664
665static void be_tx_stats_update(struct be_tx_obj *txo, 665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
666 u32 wrb_cnt, u32 copied, u32 gso_segs,
667 bool stopped)
668{ 666{
669 struct be_tx_stats *stats = tx_stats(txo); 667 struct be_tx_stats *stats = tx_stats(txo);
670 668
671 u64_stats_update_begin(&stats->sync); 669 u64_stats_update_begin(&stats->sync);
672 stats->tx_reqs++; 670 stats->tx_reqs++;
673 stats->tx_wrbs += wrb_cnt; 671 stats->tx_bytes += skb->len;
674 stats->tx_bytes += copied; 672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
675 stats->tx_pkts += (gso_segs ? gso_segs : 1);
676 if (stopped)
677 stats->tx_stops++;
678 u64_stats_update_end(&stats->sync); 673 u64_stats_update_end(&stats->sync);
679} 674}
680 675
681/* Determine number of WRB entries needed to xmit data in an skb */ 676/* Returns number of WRBs needed for the skb */
682static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb, 677static u32 skb_wrb_cnt(struct sk_buff *skb)
683 bool *dummy)
684{ 678{
685 int cnt = (skb->len > skb->data_len); 679 /* +1 for the header wrb */
686 680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
687 cnt += skb_shinfo(skb)->nr_frags;
688
689 /* to account for hdr wrb */
690 cnt++;
691 if (lancer_chip(adapter) || !(cnt & 1)) {
692 *dummy = false;
693 } else {
694 /* add a dummy to make it an even num */
695 cnt++;
696 *dummy = true;
697 }
698 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
699 return cnt;
700} 681}
701 682
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) 683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{ 684{
704 wrb->frag_pa_hi = upper_32_bits(addr); 685 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
705 wrb->frag_pa_lo = addr & 0xFFFFFFFF; 686 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
706 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; 687 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
688 wrb->rsvd0 = 0;
689}
690
691/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
692 * to avoid the swap and shift/mask operations in wrb_fill().
693 */
694static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
695{
696 wrb->frag_pa_hi = 0;
697 wrb->frag_pa_lo = 0;
698 wrb->frag_len = 0;
707 wrb->rsvd0 = 0; 699 wrb->rsvd0 = 0;
708} 700}
709 701
@@ -713,7 +705,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
713 u8 vlan_prio; 705 u8 vlan_prio;
714 u16 vlan_tag; 706 u16 vlan_tag;
715 707
716 vlan_tag = vlan_tx_tag_get(skb); 708 vlan_tag = skb_vlan_tag_get(skb);
717 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 709 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
718 /* If vlan priority provided by OS is NOT in available bmap */ 710 /* If vlan priority provided by OS is NOT in available bmap */
719 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) 711 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
@@ -764,52 +756,57 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
764 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1); 756 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
765 } 757 }
766 758
767 if (vlan_tx_tag_present(skb)) { 759 if (skb_vlan_tag_present(skb)) {
768 SET_TX_WRB_HDR_BITS(vlan, hdr, 1); 760 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
769 vlan_tag = be_get_tx_vlan_tag(adapter, skb); 761 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
770 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag); 762 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
771 } 763 }
772 764
773 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
774 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
775 SET_TX_WRB_HDR_BITS(event, hdr, 1);
776 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt); 765 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
777 SET_TX_WRB_HDR_BITS(len, hdr, len); 766 SET_TX_WRB_HDR_BITS(len, hdr, len);
767
768 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
769 * When this hack is not needed, the evt bit is set while ringing DB
770 */
771 if (skip_hw_vlan)
772 SET_TX_WRB_HDR_BITS(event, hdr, 1);
778} 773}
779 774
780static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, 775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
781 bool unmap_single) 776 bool unmap_single)
782{ 777{
783 dma_addr_t dma; 778 dma_addr_t dma;
779 u32 frag_len = le32_to_cpu(wrb->frag_len);
784 780
785 be_dws_le_to_cpu(wrb, sizeof(*wrb));
786 781
787 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; 782 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
788 if (wrb->frag_len) { 783 (u64)le32_to_cpu(wrb->frag_pa_lo);
784 if (frag_len) {
789 if (unmap_single) 785 if (unmap_single)
790 dma_unmap_single(dev, dma, wrb->frag_len, 786 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
791 DMA_TO_DEVICE);
792 else 787 else
793 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE); 788 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
794 } 789 }
795} 790}
796 791
797static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, 792/* Returns the number of WRBs used up by the skb */
798 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, 793static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
799 bool skip_hw_vlan) 794 struct sk_buff *skb, bool skip_hw_vlan)
800{ 795{
801 dma_addr_t busaddr; 796 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
802 int i, copied = 0;
803 struct device *dev = &adapter->pdev->dev; 797 struct device *dev = &adapter->pdev->dev;
804 struct sk_buff *first_skb = skb; 798 struct be_queue_info *txq = &txo->q;
805 struct be_eth_wrb *wrb;
806 struct be_eth_hdr_wrb *hdr; 799 struct be_eth_hdr_wrb *hdr;
807 bool map_single = false; 800 bool map_single = false;
808 u16 map_head; 801 struct be_eth_wrb *wrb;
802 dma_addr_t busaddr;
803 u16 head = txq->head;
809 804
810 hdr = queue_head_node(txq); 805 hdr = queue_head_node(txq);
806 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
807 be_dws_cpu_to_le(hdr, sizeof(*hdr));
808
811 queue_head_inc(txq); 809 queue_head_inc(txq);
812 map_head = txq->head;
813 810
814 if (skb->len > skb->data_len) { 811 if (skb->len > skb->data_len) {
815 int len = skb_headlen(skb); 812 int len = skb_headlen(skb);
@@ -820,7 +817,6 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
820 map_single = true; 817 map_single = true;
821 wrb = queue_head_node(txq); 818 wrb = queue_head_node(txq);
822 wrb_fill(wrb, busaddr, len); 819 wrb_fill(wrb, busaddr, len);
823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq); 820 queue_head_inc(txq);
825 copied += len; 821 copied += len;
826 } 822 }
@@ -834,35 +830,44 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
834 goto dma_err; 830 goto dma_err;
835 wrb = queue_head_node(txq); 831 wrb = queue_head_node(txq);
836 wrb_fill(wrb, busaddr, skb_frag_size(frag)); 832 wrb_fill(wrb, busaddr, skb_frag_size(frag));
837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq); 833 queue_head_inc(txq);
839 copied += skb_frag_size(frag); 834 copied += skb_frag_size(frag);
840 } 835 }
841 836
842 if (dummy_wrb) { 837 BUG_ON(txo->sent_skb_list[head]);
843 wrb = queue_head_node(txq); 838 txo->sent_skb_list[head] = skb;
844 wrb_fill(wrb, 0, 0); 839 txo->last_req_hdr = head;
845 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 840 atomic_add(wrb_cnt, &txq->used);
846 queue_head_inc(txq); 841 txo->last_req_wrb_cnt = wrb_cnt;
847 } 842 txo->pend_wrb_cnt += wrb_cnt;
848 843
849 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan); 844 be_tx_stats_update(txo, skb);
850 be_dws_cpu_to_le(hdr, sizeof(*hdr)); 845 return wrb_cnt;
851 846
852 return copied;
853dma_err: 847dma_err:
854 txq->head = map_head; 848 /* Bring the queue back to the state it was in before this
849 * routine was invoked.
850 */
851 txq->head = head;
852 /* skip the first wrb (hdr); it's not mapped */
853 queue_head_inc(txq);
855 while (copied) { 854 while (copied) {
856 wrb = queue_head_node(txq); 855 wrb = queue_head_node(txq);
857 unmap_tx_frag(dev, wrb, map_single); 856 unmap_tx_frag(dev, wrb, map_single);
858 map_single = false; 857 map_single = false;
859 copied -= wrb->frag_len; 858 copied -= le32_to_cpu(wrb->frag_len);
860 adapter->drv_stats.dma_map_errors++; 859 adapter->drv_stats.dma_map_errors++;
861 queue_head_inc(txq); 860 queue_head_inc(txq);
862 } 861 }
862 txq->head = head;
863 return 0; 863 return 0;
864} 864}
865 865
866static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
867{
868 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
869}
870
866static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, 871static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
867 struct sk_buff *skb, 872 struct sk_buff *skb,
868 bool *skip_hw_vlan) 873 bool *skip_hw_vlan)
@@ -873,7 +878,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
873 if (unlikely(!skb)) 878 if (unlikely(!skb))
874 return skb; 879 return skb;
875 880
876 if (vlan_tx_tag_present(skb)) 881 if (skb_vlan_tag_present(skb))
877 vlan_tag = be_get_tx_vlan_tag(adapter, skb); 882 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
878 883
879 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) { 884 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
@@ -932,7 +937,7 @@ static bool be_ipv6_exthdr_check(struct sk_buff *skb)
932 937
933static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb) 938static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
934{ 939{
935 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; 940 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
936} 941}
937 942
938static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) 943static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
@@ -955,7 +960,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
955 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? 960 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
956 VLAN_ETH_HLEN : ETH_HLEN; 961 VLAN_ETH_HLEN : ETH_HLEN;
957 if (skb->len <= 60 && 962 if (skb->len <= 60 &&
958 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) && 963 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
959 is_ipv4_pkt(skb)) { 964 is_ipv4_pkt(skb)) {
960 ip = (struct iphdr *)ip_hdr(skb); 965 ip = (struct iphdr *)ip_hdr(skb);
961 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); 966 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
@@ -973,7 +978,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
973 * Manually insert VLAN in pkt. 978 * Manually insert VLAN in pkt.
974 */ 979 */
975 if (skb->ip_summed != CHECKSUM_PARTIAL && 980 if (skb->ip_summed != CHECKSUM_PARTIAL &&
976 vlan_tx_tag_present(skb)) { 981 skb_vlan_tag_present(skb)) {
977 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); 982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
978 if (unlikely(!skb)) 983 if (unlikely(!skb))
979 goto err; 984 goto err;
@@ -1030,52 +1035,64 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1030 return skb; 1035 return skb;
1031} 1036}
1032 1037
1038static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1039{
1040 struct be_queue_info *txq = &txo->q;
1041 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1042
1043 /* Mark the last request eventable if it hasn't been marked already */
1044 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1045 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1046
1047 /* compose a dummy wrb if there are odd set of wrbs to notify */
1048 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1049 wrb_fill_dummy(queue_head_node(txq));
1050 queue_head_inc(txq);
1051 atomic_inc(&txq->used);
1052 txo->pend_wrb_cnt++;
1053 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1054 TX_HDR_WRB_NUM_SHIFT);
1055 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1056 TX_HDR_WRB_NUM_SHIFT);
1057 }
1058 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1059 txo->pend_wrb_cnt = 0;
1060}
1061
1033static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) 1062static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1034{ 1063{
1064 bool skip_hw_vlan = false, flush = !skb->xmit_more;
1035 struct be_adapter *adapter = netdev_priv(netdev); 1065 struct be_adapter *adapter = netdev_priv(netdev);
1036 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; 1066 u16 q_idx = skb_get_queue_mapping(skb);
1067 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1037 struct be_queue_info *txq = &txo->q; 1068 struct be_queue_info *txq = &txo->q;
1038 bool dummy_wrb, stopped = false; 1069 u16 wrb_cnt;
1039 u32 wrb_cnt = 0, copied = 0;
1040 bool skip_hw_vlan = false;
1041 u32 start = txq->head;
1042 1070
1043 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); 1071 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1044 if (!skb) { 1072 if (unlikely(!skb))
1045 tx_stats(txo)->tx_drv_drops++; 1073 goto drop;
1046 return NETDEV_TX_OK;
1047 }
1048
1049 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1050 1074
1051 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb, 1075 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1052 skip_hw_vlan); 1076 if (unlikely(!wrb_cnt)) {
1053 if (copied) { 1077 dev_kfree_skb_any(skb);
1054 int gso_segs = skb_shinfo(skb)->gso_segs; 1078 goto drop;
1079 }
1055 1080
1056 /* record the sent skb in the sent_skb table */ 1081 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1057 BUG_ON(txo->sent_skb_list[start]); 1082 netif_stop_subqueue(netdev, q_idx);
1058 txo->sent_skb_list[start] = skb; 1083 tx_stats(txo)->tx_stops++;
1084 }
1059 1085
1060 /* Ensure txq has space for the next skb; Else stop the queue 1086 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1061 * *BEFORE* ringing the tx doorbell, so that we serialze the 1087 be_xmit_flush(adapter, txo);
1062 * tx compls of the current transmit which'll wake up the queue
1063 */
1064 atomic_add(wrb_cnt, &txq->used);
1065 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1066 txq->len) {
1067 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1068 stopped = true;
1069 }
1070 1088
1071 be_txq_notify(adapter, txo, wrb_cnt); 1089 return NETDEV_TX_OK;
1090drop:
1091 tx_stats(txo)->tx_drv_drops++;
1092 /* Flush the already enqueued tx requests */
1093 if (flush && txo->pend_wrb_cnt)
1094 be_xmit_flush(adapter, txo);
1072 1095
1073 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1074 } else {
1075 txq->head = start;
1076 tx_stats(txo)->tx_drv_drops++;
1077 dev_kfree_skb_any(skb);
1078 }
1079 return NETDEV_TX_OK; 1096 return NETDEV_TX_OK;
1080} 1097}
1081 1098
@@ -1096,6 +1113,43 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
1096 return 0; 1113 return 0;
1097} 1114}
1098 1115
1116static inline bool be_in_all_promisc(struct be_adapter *adapter)
1117{
1118 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1119 BE_IF_FLAGS_ALL_PROMISCUOUS;
1120}
1121
1122static int be_set_vlan_promisc(struct be_adapter *adapter)
1123{
1124 struct device *dev = &adapter->pdev->dev;
1125 int status;
1126
1127 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1128 return 0;
1129
1130 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1131 if (!status) {
1132 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1133 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1134 } else {
1135 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1136 }
1137 return status;
1138}
1139
1140static int be_clear_vlan_promisc(struct be_adapter *adapter)
1141{
1142 struct device *dev = &adapter->pdev->dev;
1143 int status;
1144
1145 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1146 if (!status) {
1147 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1148 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1149 }
1150 return status;
1151}
1152
1099/* 1153/*
1100 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. 1154 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1101 * If the user configures more, place BE in vlan promiscuous mode. 1155 * If the user configures more, place BE in vlan promiscuous mode.
@@ -1108,11 +1162,11 @@ static int be_vid_config(struct be_adapter *adapter)
1108 int status = 0; 1162 int status = 0;
1109 1163
1110 /* No need to further configure vids if in promiscuous mode */ 1164 /* No need to further configure vids if in promiscuous mode */
1111 if (adapter->promiscuous) 1165 if (be_in_all_promisc(adapter))
1112 return 0; 1166 return 0;
1113 1167
1114 if (adapter->vlans_added > be_max_vlans(adapter)) 1168 if (adapter->vlans_added > be_max_vlans(adapter))
1115 goto set_vlan_promisc; 1169 return be_set_vlan_promisc(adapter);
1116 1170
1117 /* Construct VLAN Table to give to HW */ 1171 /* Construct VLAN Table to give to HW */
1118 for_each_set_bit(i, adapter->vids, VLAN_N_VID) 1172 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
@@ -1120,36 +1174,14 @@ static int be_vid_config(struct be_adapter *adapter)
1120 1174
1121 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); 1175 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
1122 if (status) { 1176 if (status) {
1177 dev_err(dev, "Setting HW VLAN filtering failed\n");
1123 /* Set to VLAN promisc mode as setting VLAN filter failed */ 1178 /* Set to VLAN promisc mode as setting VLAN filter failed */
1124 if (addl_status(status) == 1179 if (addl_status(status) ==
1125 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES) 1180 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1126 goto set_vlan_promisc; 1181 return be_set_vlan_promisc(adapter);
1127 dev_err(dev, "Setting HW VLAN filtering failed\n"); 1182 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1128 } else { 1183 status = be_clear_vlan_promisc(adapter);
1129 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1130 /* hw VLAN filtering re-enabled. */
1131 status = be_cmd_rx_filter(adapter,
1132 BE_FLAGS_VLAN_PROMISC, OFF);
1133 if (!status) {
1134 dev_info(dev,
1135 "Disabling VLAN Promiscuous mode\n");
1136 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1137 }
1138 }
1139 } 1184 }
1140
1141 return status;
1142
1143set_vlan_promisc:
1144 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1145 return 0;
1146
1147 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1148 if (!status) {
1149 dev_info(dev, "Enable VLAN Promiscuous mode\n");
1150 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1151 } else
1152 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
1153 return status; 1185 return status;
1154} 1186}
1155 1187
@@ -1191,79 +1223,99 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1191 return be_vid_config(adapter); 1223 return be_vid_config(adapter);
1192} 1224}
1193 1225
1194static void be_clear_promisc(struct be_adapter *adapter) 1226static void be_clear_all_promisc(struct be_adapter *adapter)
1195{ 1227{
1196 adapter->promiscuous = false; 1228 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
1197 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC); 1229 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1230}
1198 1231
1199 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); 1232static void be_set_all_promisc(struct be_adapter *adapter)
1233{
1234 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1235 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1200} 1236}
1201 1237
1202static void be_set_rx_mode(struct net_device *netdev) 1238static void be_set_mc_promisc(struct be_adapter *adapter)
1203{ 1239{
1204 struct be_adapter *adapter = netdev_priv(netdev);
1205 int status; 1240 int status;
1206 1241
1207 if (netdev->flags & IFF_PROMISC) { 1242 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1208 be_cmd_rx_filter(adapter, IFF_PROMISC, ON); 1243 return;
1209 adapter->promiscuous = true;
1210 goto done;
1211 }
1212 1244
1213 /* BE was previously in promiscuous mode; disable it */ 1245 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1214 if (adapter->promiscuous) { 1246 if (!status)
1215 be_clear_promisc(adapter); 1247 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1216 if (adapter->vlans_added) 1248}
1217 be_vid_config(adapter); 1249
1250static void be_set_mc_list(struct be_adapter *adapter)
1251{
1252 int status;
1253
1254 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1255 if (!status)
1256 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1257 else
1258 be_set_mc_promisc(adapter);
1259}
1260
1261static void be_set_uc_list(struct be_adapter *adapter)
1262{
1263 struct netdev_hw_addr *ha;
1264 int i = 1; /* First slot is claimed by the Primary MAC */
1265
1266 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1267 be_cmd_pmac_del(adapter, adapter->if_handle,
1268 adapter->pmac_id[i], 0);
1269
1270 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1271 be_set_all_promisc(adapter);
1272 return;
1218 } 1273 }
1219 1274
1220 /* Enable multicast promisc if num configured exceeds what we support */ 1275 netdev_for_each_uc_addr(ha, adapter->netdev) {
1221 if (netdev->flags & IFF_ALLMULTI || 1276 adapter->uc_macs++; /* First slot is for Primary MAC */
1222 netdev_mc_count(netdev) > be_max_mc(adapter)) 1277 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1223 goto set_mcast_promisc; 1278 &adapter->pmac_id[adapter->uc_macs], 0);
1279 }
1280}
1224 1281
1225 if (netdev_uc_count(netdev) != adapter->uc_macs) { 1282static void be_clear_uc_list(struct be_adapter *adapter)
1226 struct netdev_hw_addr *ha; 1283{
1227 int i = 1; /* First slot is claimed by the Primary MAC */ 1284 int i;
1228 1285
1229 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) { 1286 for (i = 1; i < (adapter->uc_macs + 1); i++)
1230 be_cmd_pmac_del(adapter, adapter->if_handle, 1287 be_cmd_pmac_del(adapter, adapter->if_handle,
1231 adapter->pmac_id[i], 0); 1288 adapter->pmac_id[i], 0);
1232 } 1289 adapter->uc_macs = 0;
1290}
1233 1291
1234 if (netdev_uc_count(netdev) > be_max_uc(adapter)) { 1292static void be_set_rx_mode(struct net_device *netdev)
1235 be_cmd_rx_filter(adapter, IFF_PROMISC, ON); 1293{
1236 adapter->promiscuous = true; 1294 struct be_adapter *adapter = netdev_priv(netdev);
1237 goto done;
1238 }
1239 1295
1240 netdev_for_each_uc_addr(ha, adapter->netdev) { 1296 if (netdev->flags & IFF_PROMISC) {
1241 adapter->uc_macs++; /* First slot is for Primary MAC */ 1297 be_set_all_promisc(adapter);
1242 be_cmd_pmac_add(adapter, (u8 *)ha->addr, 1298 return;
1243 adapter->if_handle,
1244 &adapter->pmac_id[adapter->uc_macs], 0);
1245 }
1246 } 1299 }
1247 1300
1248 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); 1301 /* Interface was previously in promiscuous mode; disable it */
1249 if (!status) { 1302 if (be_in_all_promisc(adapter)) {
1250 if (adapter->flags & BE_FLAGS_MCAST_PROMISC) 1303 be_clear_all_promisc(adapter);
1251 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC; 1304 if (adapter->vlans_added)
1252 goto done; 1305 be_vid_config(adapter);
1253 } 1306 }
1254 1307
1255set_mcast_promisc: 1308 /* Enable multicast promisc if num configured exceeds what we support */
1256 if (adapter->flags & BE_FLAGS_MCAST_PROMISC) 1309 if (netdev->flags & IFF_ALLMULTI ||
1310 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1311 be_set_mc_promisc(adapter);
1257 return; 1312 return;
1313 }
1258 1314
1259 /* Set to MCAST promisc mode if setting MULTICAST address fails 1315 if (netdev_uc_count(netdev) != adapter->uc_macs)
1260 * or if num configured exceeds what we support 1316 be_set_uc_list(adapter);
1261 */ 1317
1262 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); 1318 be_set_mc_list(adapter);
1263 if (!status)
1264 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
1265done:
1266 return;
1267} 1319}
1268 1320
1269static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1321static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
@@ -1959,32 +2011,34 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1959static u16 be_tx_compl_process(struct be_adapter *adapter, 2011static u16 be_tx_compl_process(struct be_adapter *adapter,
1960 struct be_tx_obj *txo, u16 last_index) 2012 struct be_tx_obj *txo, u16 last_index)
1961{ 2013{
2014 struct sk_buff **sent_skbs = txo->sent_skb_list;
1962 struct be_queue_info *txq = &txo->q; 2015 struct be_queue_info *txq = &txo->q;
2016 u16 frag_index, num_wrbs = 0;
2017 struct sk_buff *skb = NULL;
2018 bool unmap_skb_hdr = false;
1963 struct be_eth_wrb *wrb; 2019 struct be_eth_wrb *wrb;
1964 struct sk_buff **sent_skbs = txo->sent_skb_list;
1965 struct sk_buff *sent_skb;
1966 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1967 bool unmap_skb_hdr = true;
1968
1969 sent_skb = sent_skbs[txq->tail];
1970 BUG_ON(!sent_skb);
1971 sent_skbs[txq->tail] = NULL;
1972
1973 /* skip header wrb */
1974 queue_tail_inc(txq);
1975 2020
1976 do { 2021 do {
1977 cur_index = txq->tail; 2022 if (sent_skbs[txq->tail]) {
2023 /* Free skb from prev req */
2024 if (skb)
2025 dev_consume_skb_any(skb);
2026 skb = sent_skbs[txq->tail];
2027 sent_skbs[txq->tail] = NULL;
2028 queue_tail_inc(txq); /* skip hdr wrb */
2029 num_wrbs++;
2030 unmap_skb_hdr = true;
2031 }
1978 wrb = queue_tail_node(txq); 2032 wrb = queue_tail_node(txq);
2033 frag_index = txq->tail;
1979 unmap_tx_frag(&adapter->pdev->dev, wrb, 2034 unmap_tx_frag(&adapter->pdev->dev, wrb,
1980 (unmap_skb_hdr && skb_headlen(sent_skb))); 2035 (unmap_skb_hdr && skb_headlen(skb)));
1981 unmap_skb_hdr = false; 2036 unmap_skb_hdr = false;
1982
1983 num_wrbs++;
1984 queue_tail_inc(txq); 2037 queue_tail_inc(txq);
1985 } while (cur_index != last_index); 2038 num_wrbs++;
2039 } while (frag_index != last_index);
2040 dev_consume_skb_any(skb);
1986 2041
1987 dev_consume_skb_any(sent_skb);
1988 return num_wrbs; 2042 return num_wrbs;
1989} 2043}
1990 2044
@@ -2068,12 +2122,11 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
2068 2122
2069static void be_tx_compl_clean(struct be_adapter *adapter) 2123static void be_tx_compl_clean(struct be_adapter *adapter)
2070{ 2124{
2125 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2126 struct device *dev = &adapter->pdev->dev;
2071 struct be_tx_obj *txo; 2127 struct be_tx_obj *txo;
2072 struct be_queue_info *txq; 2128 struct be_queue_info *txq;
2073 struct be_eth_tx_compl *txcp; 2129 struct be_eth_tx_compl *txcp;
2074 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2075 struct sk_buff *sent_skb;
2076 bool dummy_wrb;
2077 int i, pending_txqs; 2130 int i, pending_txqs;
2078 2131
2079 /* Stop polling for compls when HW has been silent for 10ms */ 2132 /* Stop polling for compls when HW has been silent for 10ms */
@@ -2095,7 +2148,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2095 atomic_sub(num_wrbs, &txq->used); 2148 atomic_sub(num_wrbs, &txq->used);
2096 timeo = 0; 2149 timeo = 0;
2097 } 2150 }
2098 if (atomic_read(&txq->used) == 0) 2151 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
2099 pending_txqs--; 2152 pending_txqs--;
2100 } 2153 }
2101 2154
@@ -2105,21 +2158,29 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2105 mdelay(1); 2158 mdelay(1);
2106 } while (true); 2159 } while (true);
2107 2160
2161 /* Free enqueued TX that was never notified to HW */
2108 for_all_tx_queues(adapter, txo, i) { 2162 for_all_tx_queues(adapter, txo, i) {
2109 txq = &txo->q; 2163 txq = &txo->q;
2110 if (atomic_read(&txq->used))
2111 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2112 atomic_read(&txq->used));
2113 2164
2114 /* free posted tx for which compls will never arrive */ 2165 if (atomic_read(&txq->used)) {
2115 while (atomic_read(&txq->used)) { 2166 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2116 sent_skb = txo->sent_skb_list[txq->tail]; 2167 i, atomic_read(&txq->used));
2168 notified_idx = txq->tail;
2117 end_idx = txq->tail; 2169 end_idx = txq->tail;
2118 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb, 2170 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2119 &dummy_wrb); 2171 txq->len);
2120 index_adv(&end_idx, num_wrbs - 1, txq->len); 2172 /* Use the tx-compl process logic to handle requests
2173 * that were not sent to the HW.
2174 */
2121 num_wrbs = be_tx_compl_process(adapter, txo, end_idx); 2175 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2122 atomic_sub(num_wrbs, &txq->used); 2176 atomic_sub(num_wrbs, &txq->used);
2177 BUG_ON(atomic_read(&txq->used));
2178 txo->pend_wrb_cnt = 0;
2179 /* Since hw was never notified of these requests,
2180 * reset TXQ indices
2181 */
2182 txq->head = notified_idx;
2183 txq->tail = notified_idx;
2123 } 2184 }
2124 } 2185 }
2125} 2186}
@@ -2514,6 +2575,106 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2514 } 2575 }
2515} 2576}
2516 2577
2578#ifdef CONFIG_NET_RX_BUSY_POLL
2579static inline bool be_lock_napi(struct be_eq_obj *eqo)
2580{
2581 bool status = true;
2582
2583 spin_lock(&eqo->lock); /* BH is already disabled */
2584 if (eqo->state & BE_EQ_LOCKED) {
2585 WARN_ON(eqo->state & BE_EQ_NAPI);
2586 eqo->state |= BE_EQ_NAPI_YIELD;
2587 status = false;
2588 } else {
2589 eqo->state = BE_EQ_NAPI;
2590 }
2591 spin_unlock(&eqo->lock);
2592 return status;
2593}
2594
2595static inline void be_unlock_napi(struct be_eq_obj *eqo)
2596{
2597 spin_lock(&eqo->lock); /* BH is already disabled */
2598
2599 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2600 eqo->state = BE_EQ_IDLE;
2601
2602 spin_unlock(&eqo->lock);
2603}
2604
2605static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2606{
2607 bool status = true;
2608
2609 spin_lock_bh(&eqo->lock);
2610 if (eqo->state & BE_EQ_LOCKED) {
2611 eqo->state |= BE_EQ_POLL_YIELD;
2612 status = false;
2613 } else {
2614 eqo->state |= BE_EQ_POLL;
2615 }
2616 spin_unlock_bh(&eqo->lock);
2617 return status;
2618}
2619
2620static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2621{
2622 spin_lock_bh(&eqo->lock);
2623
2624 WARN_ON(eqo->state & (BE_EQ_NAPI));
2625 eqo->state = BE_EQ_IDLE;
2626
2627 spin_unlock_bh(&eqo->lock);
2628}
2629
2630static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2631{
2632 spin_lock_init(&eqo->lock);
2633 eqo->state = BE_EQ_IDLE;
2634}
2635
2636static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2637{
2638 local_bh_disable();
2639
2640 /* It's enough to just acquire napi lock on the eqo to stop
2641 * be_busy_poll() from processing any queueus.
2642 */
2643 while (!be_lock_napi(eqo))
2644 mdelay(1);
2645
2646 local_bh_enable();
2647}
2648
2649#else /* CONFIG_NET_RX_BUSY_POLL */
2650
2651static inline bool be_lock_napi(struct be_eq_obj *eqo)
2652{
2653 return true;
2654}
2655
2656static inline void be_unlock_napi(struct be_eq_obj *eqo)
2657{
2658}
2659
2660static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2661{
2662 return false;
2663}
2664
2665static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2666{
2667}
2668
2669static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2670{
2671}
2672
2673static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2674{
2675}
2676#endif /* CONFIG_NET_RX_BUSY_POLL */
2677
2517int be_poll(struct napi_struct *napi, int budget) 2678int be_poll(struct napi_struct *napi, int budget)
2518{ 2679{
2519 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); 2680 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
@@ -2833,11 +2994,7 @@ static int be_close(struct net_device *netdev)
2833 be_tx_compl_clean(adapter); 2994 be_tx_compl_clean(adapter);
2834 2995
2835 be_rx_qs_destroy(adapter); 2996 be_rx_qs_destroy(adapter);
2836 2997 be_clear_uc_list(adapter);
2837 for (i = 1; i < (adapter->uc_macs + 1); i++)
2838 be_cmd_pmac_del(adapter, adapter->if_handle,
2839 adapter->pmac_id[i], 0);
2840 adapter->uc_macs = 0;
2841 2998
2842 for_all_evt_queues(adapter, eqo, i) { 2999 for_all_evt_queues(adapter, eqo, i) {
2843 if (msix_enabled(adapter)) 3000 if (msix_enabled(adapter))
@@ -3008,6 +3165,19 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
3008 return status; 3165 return status;
3009} 3166}
3010 3167
3168static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3169{
3170 u32 addr;
3171
3172 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3173
3174 mac[5] = (u8)(addr & 0xFF);
3175 mac[4] = (u8)((addr >> 8) & 0xFF);
3176 mac[3] = (u8)((addr >> 16) & 0xFF);
3177 /* Use the OUI from the current MAC address */
3178 memcpy(mac, adapter->netdev->dev_addr, 3);
3179}
3180
3011/* 3181/*
3012 * Generate a seed MAC address from the PF MAC Address using jhash. 3182 * Generate a seed MAC address from the PF MAC Address using jhash.
3013 * MAC Address for VFs are assigned incrementally starting from the seed. 3183 * MAC Address for VFs are assigned incrementally starting from the seed.
@@ -3108,14 +3278,9 @@ static void be_cancel_worker(struct be_adapter *adapter)
3108 3278
3109static void be_mac_clear(struct be_adapter *adapter) 3279static void be_mac_clear(struct be_adapter *adapter)
3110{ 3280{
3111 int i;
3112
3113 if (adapter->pmac_id) { 3281 if (adapter->pmac_id) {
3114 for (i = 0; i < (adapter->uc_macs + 1); i++) 3282 be_cmd_pmac_del(adapter, adapter->if_handle,
3115 be_cmd_pmac_del(adapter, adapter->if_handle, 3283 adapter->pmac_id[0], 0);
3116 adapter->pmac_id[i], 0);
3117 adapter->uc_macs = 0;
3118
3119 kfree(adapter->pmac_id); 3284 kfree(adapter->pmac_id);
3120 adapter->pmac_id = NULL; 3285 adapter->pmac_id = NULL;
3121 } 3286 }
@@ -3171,13 +3336,32 @@ static int be_clear(struct be_adapter *adapter)
3171 return 0; 3336 return 0;
3172} 3337}
3173 3338
3339static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3340 u32 cap_flags, u32 vf)
3341{
3342 u32 en_flags;
3343 int status;
3344
3345 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3346 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3347 BE_IF_FLAGS_RSS;
3348
3349 en_flags &= cap_flags;
3350
3351 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3352 if_handle, vf);
3353
3354 return status;
3355}
3356
3174static int be_vfs_if_create(struct be_adapter *adapter) 3357static int be_vfs_if_create(struct be_adapter *adapter)
3175{ 3358{
3176 struct be_resources res = {0}; 3359 struct be_resources res = {0};
3177 struct be_vf_cfg *vf_cfg; 3360 struct be_vf_cfg *vf_cfg;
3178 u32 cap_flags, en_flags, vf; 3361 u32 cap_flags, vf;
3179 int status = 0; 3362 int status;
3180 3363
3364 /* If a FW profile exists, then cap_flags are updated */
3181 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3365 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3182 BE_IF_FLAGS_MULTICAST; 3366 BE_IF_FLAGS_MULTICAST;
3183 3367
@@ -3189,18 +3373,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3189 cap_flags = res.if_cap_flags; 3373 cap_flags = res.if_cap_flags;
3190 } 3374 }
3191 3375
3192 /* If a FW profile exists, then cap_flags are updated */ 3376 status = be_if_create(adapter, &vf_cfg->if_handle,
3193 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | 3377 cap_flags, vf + 1);
3194 BE_IF_FLAGS_BROADCAST |
3195 BE_IF_FLAGS_MULTICAST);
3196 status =
3197 be_cmd_if_create(adapter, cap_flags, en_flags,
3198 &vf_cfg->if_handle, vf + 1);
3199 if (status) 3378 if (status)
3200 goto err; 3379 return status;
3201 } 3380 }
3202err: 3381
3203 return status; 3382 return 0;
3204} 3383}
3205 3384
3206static int be_vf_setup_init(struct be_adapter *adapter) 3385static int be_vf_setup_init(struct be_adapter *adapter)
@@ -3385,7 +3564,7 @@ static void be_setup_init(struct be_adapter *adapter)
3385 adapter->phy.link_speed = -1; 3564 adapter->phy.link_speed = -1;
3386 adapter->if_handle = -1; 3565 adapter->if_handle = -1;
3387 adapter->be3_native = false; 3566 adapter->be3_native = false;
3388 adapter->promiscuous = false; 3567 adapter->if_flags = 0;
3389 if (be_physfn(adapter)) 3568 if (be_physfn(adapter))
3390 adapter->cmd_privileges = MAX_PRIVILEGES; 3569 adapter->cmd_privileges = MAX_PRIVILEGES;
3391 else 3570 else
@@ -3512,7 +3691,9 @@ static int be_get_config(struct be_adapter *adapter)
3512 if (status) 3691 if (status)
3513 return status; 3692 return status;
3514 3693
3515 if (be_physfn(adapter)) { 3694 be_cmd_query_port_name(adapter);
3695
3696 if (be_physfn(adapter)) {
3516 status = be_cmd_get_active_profile(adapter, &profile_id); 3697 status = be_cmd_get_active_profile(adapter, &profile_id);
3517 if (!status) 3698 if (!status)
3518 dev_info(&adapter->pdev->dev, 3699 dev_info(&adapter->pdev->dev,
@@ -3638,10 +3819,20 @@ int be_update_queues(struct be_adapter *adapter)
3638 return status; 3819 return status;
3639} 3820}
3640 3821
3822static inline int fw_major_num(const char *fw_ver)
3823{
3824 int fw_major = 0, i;
3825
3826 i = sscanf(fw_ver, "%d.", &fw_major);
3827 if (i != 1)
3828 return 0;
3829
3830 return fw_major;
3831}
3832
3641static int be_setup(struct be_adapter *adapter) 3833static int be_setup(struct be_adapter *adapter)
3642{ 3834{
3643 struct device *dev = &adapter->pdev->dev; 3835 struct device *dev = &adapter->pdev->dev;
3644 u32 tx_fc, rx_fc, en_flags;
3645 int status; 3836 int status;
3646 3837
3647 be_setup_init(adapter); 3838 be_setup_init(adapter);
@@ -3657,13 +3848,8 @@ static int be_setup(struct be_adapter *adapter)
3657 if (status) 3848 if (status)
3658 goto err; 3849 goto err;
3659 3850
3660 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3851 status = be_if_create(adapter, &adapter->if_handle,
3661 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; 3852 be_if_cap_flags(adapter), 0);
3662 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3663 en_flags |= BE_IF_FLAGS_RSS;
3664 en_flags = en_flags & be_if_cap_flags(adapter);
3665 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3666 &adapter->if_handle, 0);
3667 if (status) 3853 if (status)
3668 goto err; 3854 goto err;
3669 3855
@@ -3696,11 +3882,14 @@ static int be_setup(struct be_adapter *adapter)
3696 3882
3697 be_cmd_get_acpi_wol_cap(adapter); 3883 be_cmd_get_acpi_wol_cap(adapter);
3698 3884
3699 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc); 3885 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3886 adapter->rx_fc);
3887 if (status)
3888 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3889 &adapter->rx_fc);
3700 3890
3701 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) 3891 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3702 be_cmd_set_flow_control(adapter, adapter->tx_fc, 3892 adapter->tx_fc, adapter->rx_fc);
3703 adapter->rx_fc);
3704 3893
3705 if (be_physfn(adapter)) 3894 if (be_physfn(adapter))
3706 be_cmd_set_logical_link_config(adapter, 3895 be_cmd_set_logical_link_config(adapter,
@@ -3739,7 +3928,7 @@ static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3739 3928
3740static bool phy_flashing_required(struct be_adapter *adapter) 3929static bool phy_flashing_required(struct be_adapter *adapter)
3741{ 3930{
3742 return (adapter->phy.phy_type == TN_8022 && 3931 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
3743 adapter->phy.interface_type == PHY_TYPE_BASET_10GB); 3932 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3744} 3933}
3745 3934
@@ -3790,7 +3979,8 @@ static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3790 int status; 3979 int status;
3791 u8 crc[4]; 3980 u8 crc[4];
3792 3981
3793 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4); 3982 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
3983 img_size - 4);
3794 if (status) 3984 if (status)
3795 return status; 3985 return status;
3796 3986
@@ -3806,13 +3996,13 @@ static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3806} 3996}
3807 3997
3808static int be_flash(struct be_adapter *adapter, const u8 *img, 3998static int be_flash(struct be_adapter *adapter, const u8 *img,
3809 struct be_dma_mem *flash_cmd, int optype, int img_size) 3999 struct be_dma_mem *flash_cmd, int optype, int img_size,
4000 u32 img_offset)
3810{ 4001{
4002 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
3811 struct be_cmd_write_flashrom *req = flash_cmd->va; 4003 struct be_cmd_write_flashrom *req = flash_cmd->va;
3812 u32 total_bytes, flash_op, num_bytes;
3813 int status; 4004 int status;
3814 4005
3815 total_bytes = img_size;
3816 while (total_bytes) { 4006 while (total_bytes) {
3817 num_bytes = min_t(u32, 32*1024, total_bytes); 4007 num_bytes = min_t(u32, 32*1024, total_bytes);
3818 4008
@@ -3833,12 +4023,15 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
3833 memcpy(req->data_buf, img, num_bytes); 4023 memcpy(req->data_buf, img, num_bytes);
3834 img += num_bytes; 4024 img += num_bytes;
3835 status = be_cmd_write_flashrom(adapter, flash_cmd, optype, 4025 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3836 flash_op, num_bytes); 4026 flash_op, img_offset +
4027 bytes_sent, num_bytes);
3837 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST && 4028 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
3838 optype == OPTYPE_PHY_FW) 4029 optype == OPTYPE_PHY_FW)
3839 break; 4030 break;
3840 else if (status) 4031 else if (status)
3841 return status; 4032 return status;
4033
4034 bytes_sent += num_bytes;
3842 } 4035 }
3843 return 0; 4036 return 0;
3844} 4037}
@@ -3906,6 +4099,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
3906 pflashcomp = gen2_flash_types; 4099 pflashcomp = gen2_flash_types;
3907 filehdr_size = sizeof(struct flash_file_hdr_g2); 4100 filehdr_size = sizeof(struct flash_file_hdr_g2);
3908 num_comp = ARRAY_SIZE(gen2_flash_types); 4101 num_comp = ARRAY_SIZE(gen2_flash_types);
4102 img_hdrs_size = 0;
3909 } 4103 }
3910 4104
3911 /* Get flash section info*/ 4105 /* Get flash section info*/
@@ -3950,7 +4144,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
3950 return -1; 4144 return -1;
3951 4145
3952 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, 4146 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3953 pflashcomp[i].size); 4147 pflashcomp[i].size, 0);
3954 if (status) { 4148 if (status) {
3955 dev_err(dev, "Flashing section type 0x%x failed\n", 4149 dev_err(dev, "Flashing section type 0x%x failed\n",
3956 pflashcomp[i].img_type); 4150 pflashcomp[i].img_type);
@@ -4017,12 +4211,12 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
4017 struct be_dma_mem *flash_cmd, int num_of_images) 4211 struct be_dma_mem *flash_cmd, int num_of_images)
4018{ 4212{
4019 int img_hdrs_size = num_of_images * sizeof(struct image_hdr); 4213 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
4214 bool crc_match, old_fw_img, flash_offset_support = true;
4020 struct device *dev = &adapter->pdev->dev; 4215 struct device *dev = &adapter->pdev->dev;
4021 struct flash_section_info *fsec = NULL; 4216 struct flash_section_info *fsec = NULL;
4022 u32 img_offset, img_size, img_type; 4217 u32 img_offset, img_size, img_type;
4218 u16 img_optype, flash_optype;
4023 int status, i, filehdr_size; 4219 int status, i, filehdr_size;
4024 bool crc_match, old_fw_img;
4025 u16 img_optype;
4026 const u8 *p; 4220 const u8 *p;
4027 4221
4028 filehdr_size = sizeof(struct flash_file_hdr_g3); 4222 filehdr_size = sizeof(struct flash_file_hdr_g3);
@@ -4032,6 +4226,7 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
4032 return -EINVAL; 4226 return -EINVAL;
4033 } 4227 }
4034 4228
4229retry_flash:
4035 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) { 4230 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4036 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset); 4231 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4037 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size); 4232 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
@@ -4041,6 +4236,12 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
4041 4236
4042 if (img_optype == 0xFFFF) 4237 if (img_optype == 0xFFFF)
4043 continue; 4238 continue;
4239
4240 if (flash_offset_support)
4241 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4242 else
4243 flash_optype = img_optype;
4244
4044 /* Don't bother verifying CRC if an old FW image is being 4245 /* Don't bother verifying CRC if an old FW image is being
4045 * flashed 4246 * flashed
4046 */ 4247 */
@@ -4049,16 +4250,26 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
4049 4250
4050 status = be_check_flash_crc(adapter, fw->data, img_offset, 4251 status = be_check_flash_crc(adapter, fw->data, img_offset,
4051 img_size, filehdr_size + 4252 img_size, filehdr_size +
4052 img_hdrs_size, img_optype, 4253 img_hdrs_size, flash_optype,
4053 &crc_match); 4254 &crc_match);
4054 /* The current FW image on the card does not recognize the new
4055 * FLASH op_type. The FW download is partially complete.
4056 * Reboot the server now to enable FW image to recognize the
4057 * new FLASH op_type. To complete the remaining process,
4058 * download the same FW again after the reboot.
4059 */
4060 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST || 4255 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4061 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) { 4256 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
4257 /* The current FW image on the card does not support
4258 * OFFSET based flashing. Retry using older mechanism
4259 * of OPTYPE based flashing
4260 */
4261 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4262 flash_offset_support = false;
4263 goto retry_flash;
4264 }
4265
4266 /* The current FW image on the card does not recognize
4267 * the new FLASH op_type. The FW download is partially
4268 * complete. Reboot the server now to enable FW image
4269 * to recognize the new FLASH op_type. To complete the
4270 * remaining process, download the same FW again after
4271 * the reboot.
4272 */
4062 dev_err(dev, "Flash incomplete. Reset the server\n"); 4273 dev_err(dev, "Flash incomplete. Reset the server\n");
4063 dev_err(dev, "Download FW image again after reset\n"); 4274 dev_err(dev, "Download FW image again after reset\n");
4064 return -EAGAIN; 4275 return -EAGAIN;
@@ -4076,7 +4287,19 @@ flash:
4076 if (p + img_size > fw->data + fw->size) 4287 if (p + img_size > fw->data + fw->size)
4077 return -1; 4288 return -1;
4078 4289
4079 status = be_flash(adapter, p, flash_cmd, img_optype, img_size); 4290 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4291 img_offset);
4292
4293 /* The current FW image on the card does not support OFFSET
4294 * based flashing. Retry using older mechanism of OPTYPE based
4295 * flashing
4296 */
4297 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4298 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4299 flash_offset_support = false;
4300 goto retry_flash;
4301 }
4302
4080 /* For old FW images ignore ILLEGAL_FIELD error or errors on 4303 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4081 * UFI_DIR region 4304 * UFI_DIR region
4082 */ 4305 */
@@ -4179,98 +4402,105 @@ static int lancer_fw_download(struct be_adapter *adapter,
4179 return 0; 4402 return 0;
4180} 4403}
4181 4404
4182#define UFI_TYPE2 2 4405#define BE2_UFI 2
4183#define UFI_TYPE3 3 4406#define BE3_UFI 3
4184#define UFI_TYPE3R 10 4407#define BE3R_UFI 10
4185#define UFI_TYPE4 4 4408#define SH_UFI 4
4409#define SH_P2_UFI 11
4410
4186static int be_get_ufi_type(struct be_adapter *adapter, 4411static int be_get_ufi_type(struct be_adapter *adapter,
4187 struct flash_file_hdr_g3 *fhdr) 4412 struct flash_file_hdr_g3 *fhdr)
4188{ 4413{
4189 if (!fhdr) 4414 if (!fhdr) {
4190 goto be_get_ufi_exit; 4415 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4416 return -1;
4417 }
4191 4418
4192 if (skyhawk_chip(adapter) && fhdr->build[0] == '4') 4419 /* First letter of the build version is used to identify
4193 return UFI_TYPE4; 4420 * which chip this image file is meant for.
4194 else if (BE3_chip(adapter) && fhdr->build[0] == '3') { 4421 */
4195 if (fhdr->asic_type_rev == 0x10) 4422 switch (fhdr->build[0]) {
4196 return UFI_TYPE3R; 4423 case BLD_STR_UFI_TYPE_SH:
4197 else 4424 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4198 return UFI_TYPE3; 4425 SH_UFI;
4199 } else if (BE2_chip(adapter) && fhdr->build[0] == '2') 4426 case BLD_STR_UFI_TYPE_BE3:
4200 return UFI_TYPE2; 4427 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4428 BE3_UFI;
4429 case BLD_STR_UFI_TYPE_BE2:
4430 return BE2_UFI;
4431 default:
4432 return -1;
4433 }
4434}
4201 4435
4202be_get_ufi_exit: 4436/* Check if the flash image file is compatible with the adapter that
4203 dev_err(&adapter->pdev->dev, 4437 * is being flashed.
4204 "UFI and Interface are not compatible for flashing\n"); 4438 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
4205 return -1; 4439 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
4440 */
4441static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4442 struct flash_file_hdr_g3 *fhdr)
4443{
4444 int ufi_type = be_get_ufi_type(adapter, fhdr);
4445
4446 switch (ufi_type) {
4447 case SH_P2_UFI:
4448 return skyhawk_chip(adapter);
4449 case SH_UFI:
4450 return (skyhawk_chip(adapter) &&
4451 adapter->asic_rev < ASIC_REV_P2);
4452 case BE3R_UFI:
4453 return BE3_chip(adapter);
4454 case BE3_UFI:
4455 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4456 case BE2_UFI:
4457 return BE2_chip(adapter);
4458 default:
4459 return false;
4460 }
4206} 4461}
4207 4462
4208static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) 4463static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4209{ 4464{
4465 struct device *dev = &adapter->pdev->dev;
4210 struct flash_file_hdr_g3 *fhdr3; 4466 struct flash_file_hdr_g3 *fhdr3;
4211 struct image_hdr *img_hdr_ptr = NULL; 4467 struct image_hdr *img_hdr_ptr;
4468 int status = 0, i, num_imgs;
4212 struct be_dma_mem flash_cmd; 4469 struct be_dma_mem flash_cmd;
4213 const u8 *p;
4214 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
4215 4470
4216 flash_cmd.size = sizeof(struct be_cmd_write_flashrom); 4471 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4217 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, 4472 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4218 &flash_cmd.dma, GFP_KERNEL); 4473 dev_err(dev, "Flash image is not compatible with adapter\n");
4219 if (!flash_cmd.va) { 4474 return -EINVAL;
4220 status = -ENOMEM;
4221 goto be_fw_exit;
4222 } 4475 }
4223 4476
4224 p = fw->data; 4477 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4225 fhdr3 = (struct flash_file_hdr_g3 *)p; 4478 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4226 4479 GFP_KERNEL);
4227 ufi_type = be_get_ufi_type(adapter, fhdr3); 4480 if (!flash_cmd.va)
4481 return -ENOMEM;
4228 4482
4229 num_imgs = le32_to_cpu(fhdr3->num_imgs); 4483 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4230 for (i = 0; i < num_imgs; i++) { 4484 for (i = 0; i < num_imgs; i++) {
4231 img_hdr_ptr = (struct image_hdr *)(fw->data + 4485 img_hdr_ptr = (struct image_hdr *)(fw->data +
4232 (sizeof(struct flash_file_hdr_g3) + 4486 (sizeof(struct flash_file_hdr_g3) +
4233 i * sizeof(struct image_hdr))); 4487 i * sizeof(struct image_hdr)));
4234 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) { 4488 if (!BE2_chip(adapter) &&
4235 switch (ufi_type) { 4489 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4236 case UFI_TYPE4: 4490 continue;
4237 status = be_flash_skyhawk(adapter, fw,
4238 &flash_cmd, num_imgs);
4239 break;
4240 case UFI_TYPE3R:
4241 status = be_flash_BEx(adapter, fw, &flash_cmd,
4242 num_imgs);
4243 break;
4244 case UFI_TYPE3:
4245 /* Do not flash this ufi on BE3-R cards */
4246 if (adapter->asic_rev < 0x10)
4247 status = be_flash_BEx(adapter, fw,
4248 &flash_cmd,
4249 num_imgs);
4250 else {
4251 status = -EINVAL;
4252 dev_err(&adapter->pdev->dev,
4253 "Can't load BE3 UFI on BE3R\n");
4254 }
4255 }
4256 }
4257 }
4258
4259 if (ufi_type == UFI_TYPE2)
4260 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
4261 else if (ufi_type == -1)
4262 status = -EINVAL;
4263 4491
4264 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, 4492 if (skyhawk_chip(adapter))
4265 flash_cmd.dma); 4493 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4266 if (status) { 4494 num_imgs);
4267 dev_err(&adapter->pdev->dev, "Firmware load error\n"); 4495 else
4268 goto be_fw_exit; 4496 status = be_flash_BEx(adapter, fw, &flash_cmd,
4497 num_imgs);
4269 } 4498 }
4270 4499
4271 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); 4500 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4501 if (!status)
4502 dev_info(dev, "Firmware flashed successfully\n");
4272 4503
4273be_fw_exit:
4274 return status; 4504 return status;
4275} 4505}
4276 4506
@@ -4304,7 +4534,8 @@ fw_exit:
4304 return status; 4534 return status;
4305} 4535}
4306 4536
4307static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) 4537static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4538 u16 flags)
4308{ 4539{
4309 struct be_adapter *adapter = netdev_priv(dev); 4540 struct be_adapter *adapter = netdev_priv(dev);
4310 struct nlattr *attr, *br_spec; 4541 struct nlattr *attr, *br_spec;
@@ -4832,6 +5063,20 @@ static void be_func_recovery_task(struct work_struct *work)
4832 msecs_to_jiffies(1000)); 5063 msecs_to_jiffies(1000));
4833} 5064}
4834 5065
5066static void be_log_sfp_info(struct be_adapter *adapter)
5067{
5068 int status;
5069
5070 status = be_cmd_query_sfp_info(adapter);
5071 if (!status) {
5072 dev_err(&adapter->pdev->dev,
5073 "Unqualified SFP+ detected on %c from %s part no: %s",
5074 adapter->port_name, adapter->phy.vendor_name,
5075 adapter->phy.vendor_pn);
5076 }
5077 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5078}
5079
4835static void be_worker(struct work_struct *work) 5080static void be_worker(struct work_struct *work)
4836{ 5081{
4837 struct be_adapter *adapter = 5082 struct be_adapter *adapter =
@@ -4870,6 +5115,9 @@ static void be_worker(struct work_struct *work)
4870 5115
4871 be_eqd_update(adapter); 5116 be_eqd_update(adapter);
4872 5117
5118 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5119 be_log_sfp_info(adapter);
5120
4873reschedule: 5121reschedule:
4874 adapter->work_counter++; 5122 adapter->work_counter++;
4875 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 5123 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
@@ -4916,12 +5164,31 @@ static inline char *func_name(struct be_adapter *adapter)
4916 return be_physfn(adapter) ? "PF" : "VF"; 5164 return be_physfn(adapter) ? "PF" : "VF";
4917} 5165}
4918 5166
5167static inline char *nic_name(struct pci_dev *pdev)
5168{
5169 switch (pdev->device) {
5170 case OC_DEVICE_ID1:
5171 return OC_NAME;
5172 case OC_DEVICE_ID2:
5173 return OC_NAME_BE;
5174 case OC_DEVICE_ID3:
5175 case OC_DEVICE_ID4:
5176 return OC_NAME_LANCER;
5177 case BE_DEVICE_ID2:
5178 return BE3_NAME;
5179 case OC_DEVICE_ID5:
5180 case OC_DEVICE_ID6:
5181 return OC_NAME_SH;
5182 default:
5183 return BE_NAME;
5184 }
5185}
5186
4919static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) 5187static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4920{ 5188{
4921 int status = 0;
4922 struct be_adapter *adapter; 5189 struct be_adapter *adapter;
4923 struct net_device *netdev; 5190 struct net_device *netdev;
4924 char port_name; 5191 int status = 0;
4925 5192
4926 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER); 5193 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4927 5194
@@ -5015,10 +5282,8 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5015 schedule_delayed_work(&adapter->func_recovery_work, 5282 schedule_delayed_work(&adapter->func_recovery_work,
5016 msecs_to_jiffies(1000)); 5283 msecs_to_jiffies(1000));
5017 5284
5018 be_cmd_query_port_name(adapter, &port_name);
5019
5020 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev), 5285 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5021 func_name(adapter), mc_name(adapter), port_name); 5286 func_name(adapter), mc_name(adapter), adapter->port_name);
5022 5287
5023 return 0; 5288 return 0;
5024 5289
@@ -5083,6 +5348,10 @@ static int be_resume(struct pci_dev *pdev)
5083 if (status) 5348 if (status)
5084 return status; 5349 return status;
5085 5350
5351 status = be_cmd_reset_function(adapter);
5352 if (status)
5353 return status;
5354
5086 be_intr_set(adapter, true); 5355 be_intr_set(adapter, true);
5087 /* tell fw we're ready to fire cmds */ 5356 /* tell fw we're ready to fire cmds */
5088 status = be_cmd_fw_init(adapter); 5357 status = be_cmd_fw_init(adapter);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 270308315d43..ba84c4a9ce32 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -69,7 +69,8 @@ config FSL_XGMAC_MDIO
69 select PHYLIB 69 select PHYLIB
70 select OF_MDIO 70 select OF_MDIO
71 ---help--- 71 ---help---
72 This driver supports the MDIO bus on the Fman 10G Ethernet MACs. 72 This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
73 on the FMan mEMAC (which supports both Clauses 22 and 45)
73 74
74config UCC_GETH 75config UCC_GETH
75 tristate "Freescale QE Gigabit Ethernet" 76 tristate "Freescale QE Gigabit Ethernet"
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 40132929daf7..a86af8a7485d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -16,6 +16,7 @@
16#include <linux/clocksource.h> 16#include <linux/clocksource.h>
17#include <linux/net_tstamp.h> 17#include <linux/net_tstamp.h>
18#include <linux/ptp_clock_kernel.h> 18#include <linux/ptp_clock_kernel.h>
19#include <linux/timecounter.h>
19 20
20#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 21#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
21 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 22 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
@@ -356,6 +357,7 @@ struct bufdesc_ex {
356#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ 357#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
357#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ 358#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
358#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ 359#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
360#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
359#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) 361#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
360#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) 362#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
361#define FEC_ENET_TS_AVAIL ((uint)0x00010000) 363#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
@@ -513,6 +515,7 @@ struct fec_enet_private {
513 int irq[FEC_IRQ_NUM]; 515 int irq[FEC_IRQ_NUM];
514 bool bufdesc_ex; 516 bool bufdesc_ex;
515 int pause_flag; 517 int pause_flag;
518 int wol_flag;
516 u32 quirks; 519 u32 quirks;
517 520
518 struct napi_struct napi; 521 struct napi_struct napi;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index bba87775419d..9bb6220663b2 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -188,6 +188,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
188#define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 188#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
189#define FEC_MMFR_TA (2 << 16) 189#define FEC_MMFR_TA (2 << 16)
190#define FEC_MMFR_DATA(v) (v & 0xffff) 190#define FEC_MMFR_DATA(v) (v & 0xffff)
191/* FEC ECR bits definition */
192#define FEC_ECR_MAGICEN (1 << 2)
193#define FEC_ECR_SLEEP (1 << 3)
191 194
192#define FEC_MII_TIMEOUT 30000 /* us */ 195#define FEC_MII_TIMEOUT 30000 /* us */
193 196
@@ -196,6 +199,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
196 199
197#define FEC_PAUSE_FLAG_AUTONEG 0x1 200#define FEC_PAUSE_FLAG_AUTONEG 0x1
198#define FEC_PAUSE_FLAG_ENABLE 0x2 201#define FEC_PAUSE_FLAG_ENABLE 0x2
202#define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
203#define FEC_WOL_FLAG_ENABLE (0x1 << 1)
204#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
199 205
200#define COPYBREAK_DEFAULT 256 206#define COPYBREAK_DEFAULT 256
201 207
@@ -1090,7 +1096,9 @@ static void
1090fec_stop(struct net_device *ndev) 1096fec_stop(struct net_device *ndev)
1091{ 1097{
1092 struct fec_enet_private *fep = netdev_priv(ndev); 1098 struct fec_enet_private *fep = netdev_priv(ndev);
1099 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1093 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); 1100 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1101 u32 val;
1094 1102
1095 /* We cannot expect a graceful transmit stop without link !!! */ 1103 /* We cannot expect a graceful transmit stop without link !!! */
1096 if (fep->link) { 1104 if (fep->link) {
@@ -1104,17 +1112,28 @@ fec_stop(struct net_device *ndev)
1104 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1112 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1105 * instead of reset MAC itself. 1113 * instead of reset MAC itself.
1106 */ 1114 */
1107 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 1115 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1108 writel(0, fep->hwp + FEC_ECNTRL); 1116 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
1117 writel(0, fep->hwp + FEC_ECNTRL);
1118 } else {
1119 writel(1, fep->hwp + FEC_ECNTRL);
1120 udelay(10);
1121 }
1122 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1109 } else { 1123 } else {
1110 writel(1, fep->hwp + FEC_ECNTRL); 1124 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1111 udelay(10); 1125 val = readl(fep->hwp + FEC_ECNTRL);
1126 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1127 writel(val, fep->hwp + FEC_ECNTRL);
1128
1129 if (pdata && pdata->sleep_mode_enable)
1130 pdata->sleep_mode_enable(true);
1112 } 1131 }
1113 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1132 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1114 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1115 1133
1116 /* We have to keep ENET enabled to have MII interrupt stay working */ 1134 /* We have to keep ENET enabled to have MII interrupt stay working */
1117 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1135 if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1136 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1118 writel(2, fep->hwp + FEC_ECNTRL); 1137 writel(2, fep->hwp + FEC_ECNTRL);
1119 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); 1138 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1120 } 1139 }
@@ -1170,12 +1189,13 @@ static void
1170fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1189fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1171{ 1190{
1172 struct fec_enet_private *fep; 1191 struct fec_enet_private *fep;
1173 struct bufdesc *bdp; 1192 struct bufdesc *bdp, *bdp_t;
1174 unsigned short status; 1193 unsigned short status;
1175 struct sk_buff *skb; 1194 struct sk_buff *skb;
1176 struct fec_enet_priv_tx_q *txq; 1195 struct fec_enet_priv_tx_q *txq;
1177 struct netdev_queue *nq; 1196 struct netdev_queue *nq;
1178 int index = 0; 1197 int index = 0;
1198 int i, bdnum;
1179 int entries_free; 1199 int entries_free;
1180 1200
1181 fep = netdev_priv(ndev); 1201 fep = netdev_priv(ndev);
@@ -1196,18 +1216,29 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1196 if (bdp == txq->cur_tx) 1216 if (bdp == txq->cur_tx)
1197 break; 1217 break;
1198 1218
1199 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); 1219 bdp_t = bdp;
1200 1220 bdnum = 1;
1221 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1201 skb = txq->tx_skbuff[index]; 1222 skb = txq->tx_skbuff[index];
1202 txq->tx_skbuff[index] = NULL; 1223 while (!skb) {
1203 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) 1224 bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
1204 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 1225 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1205 bdp->cbd_datlen, DMA_TO_DEVICE); 1226 skb = txq->tx_skbuff[index];
1206 bdp->cbd_bufaddr = 0; 1227 bdnum++;
1207 if (!skb) {
1208 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1209 continue;
1210 } 1228 }
1229 if (skb_shinfo(skb)->nr_frags &&
1230 (status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
1231 break;
1232
1233 for (i = 0; i < bdnum; i++) {
1234 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1235 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1236 bdp->cbd_datlen, DMA_TO_DEVICE);
1237 bdp->cbd_bufaddr = 0;
1238 if (i < bdnum - 1)
1239 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1240 }
1241 txq->tx_skbuff[index] = NULL;
1211 1242
1212 /* Check for errors. */ 1243 /* Check for errors. */
1213 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1244 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -2428,6 +2459,44 @@ static int fec_enet_set_tunable(struct net_device *netdev,
2428 return ret; 2459 return ret;
2429} 2460}
2430 2461
2462static void
2463fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2464{
2465 struct fec_enet_private *fep = netdev_priv(ndev);
2466
2467 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
2468 wol->supported = WAKE_MAGIC;
2469 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
2470 } else {
2471 wol->supported = wol->wolopts = 0;
2472 }
2473}
2474
2475static int
2476fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2477{
2478 struct fec_enet_private *fep = netdev_priv(ndev);
2479
2480 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
2481 return -EINVAL;
2482
2483 if (wol->wolopts & ~WAKE_MAGIC)
2484 return -EINVAL;
2485
2486 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2487 if (device_may_wakeup(&ndev->dev)) {
2488 fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
2489 if (fep->irq[0] > 0)
2490 enable_irq_wake(fep->irq[0]);
2491 } else {
2492 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
2493 if (fep->irq[0] > 0)
2494 disable_irq_wake(fep->irq[0]);
2495 }
2496
2497 return 0;
2498}
2499
2431static const struct ethtool_ops fec_enet_ethtool_ops = { 2500static const struct ethtool_ops fec_enet_ethtool_ops = {
2432 .get_settings = fec_enet_get_settings, 2501 .get_settings = fec_enet_get_settings,
2433 .set_settings = fec_enet_set_settings, 2502 .set_settings = fec_enet_set_settings,
@@ -2446,6 +2515,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
2446 .get_ts_info = fec_enet_get_ts_info, 2515 .get_ts_info = fec_enet_get_ts_info,
2447 .get_tunable = fec_enet_get_tunable, 2516 .get_tunable = fec_enet_get_tunable,
2448 .set_tunable = fec_enet_set_tunable, 2517 .set_tunable = fec_enet_set_tunable,
2518 .get_wol = fec_enet_get_wol,
2519 .set_wol = fec_enet_set_wol,
2449}; 2520};
2450 2521
2451static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2522static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -2525,12 +2596,9 @@ static void fec_enet_free_queue(struct net_device *ndev)
2525 } 2596 }
2526 2597
2527 for (i = 0; i < fep->num_rx_queues; i++) 2598 for (i = 0; i < fep->num_rx_queues; i++)
2528 if (fep->rx_queue[i]) 2599 kfree(fep->rx_queue[i]);
2529 kfree(fep->rx_queue[i]);
2530
2531 for (i = 0; i < fep->num_tx_queues; i++) 2600 for (i = 0; i < fep->num_tx_queues; i++)
2532 if (fep->tx_queue[i]) 2601 kfree(fep->tx_queue[i]);
2533 kfree(fep->tx_queue[i]);
2534} 2602}
2535 2603
2536static int fec_enet_alloc_queue(struct net_device *ndev) 2604static int fec_enet_alloc_queue(struct net_device *ndev)
@@ -2706,6 +2774,9 @@ fec_enet_open(struct net_device *ndev)
2706 phy_start(fep->phy_dev); 2774 phy_start(fep->phy_dev);
2707 netif_tx_start_all_queues(ndev); 2775 netif_tx_start_all_queues(ndev);
2708 2776
2777 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
2778 FEC_WOL_FLAG_ENABLE);
2779
2709 return 0; 2780 return 0;
2710 2781
2711err_enet_mii_probe: 2782err_enet_mii_probe:
@@ -3155,6 +3226,9 @@ fec_probe(struct platform_device *pdev)
3155 3226
3156 platform_set_drvdata(pdev, ndev); 3227 platform_set_drvdata(pdev, ndev);
3157 3228
3229 if (of_get_property(np, "fsl,magic-packet", NULL))
3230 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
3231
3158 phy_node = of_parse_phandle(np, "phy-handle", 0); 3232 phy_node = of_parse_phandle(np, "phy-handle", 0);
3159 if (!phy_node && of_phy_is_fixed_link(np)) { 3233 if (!phy_node && of_phy_is_fixed_link(np)) {
3160 ret = of_phy_register_fixed_link(np); 3234 ret = of_phy_register_fixed_link(np);
@@ -3249,6 +3323,8 @@ fec_probe(struct platform_device *pdev)
3249 0, pdev->name, ndev); 3323 0, pdev->name, ndev);
3250 if (ret) 3324 if (ret)
3251 goto failed_irq; 3325 goto failed_irq;
3326
3327 fep->irq[i] = irq;
3252 } 3328 }
3253 3329
3254 init_completion(&fep->mdio_done); 3330 init_completion(&fep->mdio_done);
@@ -3265,6 +3341,9 @@ fec_probe(struct platform_device *pdev)
3265 if (ret) 3341 if (ret)
3266 goto failed_register; 3342 goto failed_register;
3267 3343
3344 device_init_wakeup(&ndev->dev, fep->wol_flag &
3345 FEC_WOL_HAS_MAGIC_PACKET);
3346
3268 if (fep->bufdesc_ex && fep->ptp_clock) 3347 if (fep->bufdesc_ex && fep->ptp_clock)
3269 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 3348 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
3270 3349
@@ -3318,6 +3397,8 @@ static int __maybe_unused fec_suspend(struct device *dev)
3318 3397
3319 rtnl_lock(); 3398 rtnl_lock();
3320 if (netif_running(ndev)) { 3399 if (netif_running(ndev)) {
3400 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
3401 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
3321 phy_stop(fep->phy_dev); 3402 phy_stop(fep->phy_dev);
3322 napi_disable(&fep->napi); 3403 napi_disable(&fep->napi);
3323 netif_tx_lock_bh(ndev); 3404 netif_tx_lock_bh(ndev);
@@ -3325,11 +3406,12 @@ static int __maybe_unused fec_suspend(struct device *dev)
3325 netif_tx_unlock_bh(ndev); 3406 netif_tx_unlock_bh(ndev);
3326 fec_stop(ndev); 3407 fec_stop(ndev);
3327 fec_enet_clk_enable(ndev, false); 3408 fec_enet_clk_enable(ndev, false);
3328 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3409 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3410 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3329 } 3411 }
3330 rtnl_unlock(); 3412 rtnl_unlock();
3331 3413
3332 if (fep->reg_phy) 3414 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3333 regulator_disable(fep->reg_phy); 3415 regulator_disable(fep->reg_phy);
3334 3416
3335 /* SOC supply clock to phy, when clock is disabled, phy link down 3417 /* SOC supply clock to phy, when clock is disabled, phy link down
@@ -3345,9 +3427,11 @@ static int __maybe_unused fec_resume(struct device *dev)
3345{ 3427{
3346 struct net_device *ndev = dev_get_drvdata(dev); 3428 struct net_device *ndev = dev_get_drvdata(dev);
3347 struct fec_enet_private *fep = netdev_priv(ndev); 3429 struct fec_enet_private *fep = netdev_priv(ndev);
3430 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
3348 int ret; 3431 int ret;
3432 int val;
3349 3433
3350 if (fep->reg_phy) { 3434 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
3351 ret = regulator_enable(fep->reg_phy); 3435 ret = regulator_enable(fep->reg_phy);
3352 if (ret) 3436 if (ret)
3353 return ret; 3437 return ret;
@@ -3355,12 +3439,21 @@ static int __maybe_unused fec_resume(struct device *dev)
3355 3439
3356 rtnl_lock(); 3440 rtnl_lock();
3357 if (netif_running(ndev)) { 3441 if (netif_running(ndev)) {
3358 pinctrl_pm_select_default_state(&fep->pdev->dev);
3359 ret = fec_enet_clk_enable(ndev, true); 3442 ret = fec_enet_clk_enable(ndev, true);
3360 if (ret) { 3443 if (ret) {
3361 rtnl_unlock(); 3444 rtnl_unlock();
3362 goto failed_clk; 3445 goto failed_clk;
3363 } 3446 }
3447 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
3448 if (pdata && pdata->sleep_mode_enable)
3449 pdata->sleep_mode_enable(false);
3450 val = readl(fep->hwp + FEC_ECNTRL);
3451 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
3452 writel(val, fep->hwp + FEC_ECNTRL);
3453 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
3454 } else {
3455 pinctrl_pm_select_default_state(&fep->pdev->dev);
3456 }
3364 fec_restart(ndev); 3457 fec_restart(ndev);
3365 netif_tx_lock_bh(ndev); 3458 netif_tx_lock_bh(ndev);
3366 netif_device_attach(ndev); 3459 netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 992c8c3db553..1f9cf2345266 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -374,23 +374,9 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
374 struct fec_enet_private *fep = 374 struct fec_enet_private *fep =
375 container_of(ptp, struct fec_enet_private, ptp_caps); 375 container_of(ptp, struct fec_enet_private, ptp_caps);
376 unsigned long flags; 376 unsigned long flags;
377 u64 now;
378 u32 counter;
379 377
380 spin_lock_irqsave(&fep->tmreg_lock, flags); 378 spin_lock_irqsave(&fep->tmreg_lock, flags);
381 379 timecounter_adjtime(&fep->tc, delta);
382 now = timecounter_read(&fep->tc);
383 now += delta;
384
385 /* Get the timer value based on adjusted timestamp.
386 * Update the counter with the masked value.
387 */
388 counter = now & fep->cc.mask;
389 writel(counter, fep->hwp + FEC_ATIME);
390
391 /* reset the timecounter */
392 timecounter_init(&fep->tc, &fep->cc, now);
393
394 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 380 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
395 381
396 return 0; 382 return 0;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 9e2bcb807923..a17628769a1f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -278,14 +278,20 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
278 fep->stats.collisions++; 278 fep->stats.collisions++;
279 279
280 /* unmap */ 280 /* unmap */
281 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 281 if (fep->mapped_as_page[dirtyidx])
282 skb->len, DMA_TO_DEVICE); 282 dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
283 CBDR_DATLEN(bdp), DMA_TO_DEVICE);
284 else
285 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
286 CBDR_DATLEN(bdp), DMA_TO_DEVICE);
283 287
284 /* 288 /*
285 * Free the sk buffer associated with this last transmit. 289 * Free the sk buffer associated with this last transmit.
286 */ 290 */
287 dev_kfree_skb(skb); 291 if (skb) {
288 fep->tx_skbuff[dirtyidx] = NULL; 292 dev_kfree_skb(skb);
293 fep->tx_skbuff[dirtyidx] = NULL;
294 }
289 295
290 /* 296 /*
291 * Update pointer to next buffer descriptor to be transmitted. 297 * Update pointer to next buffer descriptor to be transmitted.
@@ -299,7 +305,7 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
299 * Since we have freed up a buffer, the ring is no longer 305 * Since we have freed up a buffer, the ring is no longer
300 * full. 306 * full.
301 */ 307 */
302 if (!fep->tx_free++) 308 if (++fep->tx_free >= MAX_SKB_FRAGS)
303 do_wake = 1; 309 do_wake = 1;
304 has_tx_work = 1; 310 has_tx_work = 1;
305 } 311 }
@@ -509,6 +515,9 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
509 cbd_t __iomem *bdp; 515 cbd_t __iomem *bdp;
510 int curidx; 516 int curidx;
511 u16 sc; 517 u16 sc;
518 int nr_frags = skb_shinfo(skb)->nr_frags;
519 skb_frag_t *frag;
520 int len;
512 521
513#ifdef CONFIG_FS_ENET_MPC5121_FEC 522#ifdef CONFIG_FS_ENET_MPC5121_FEC
514 if (((unsigned long)skb->data) & 0x3) { 523 if (((unsigned long)skb->data) & 0x3) {
@@ -530,7 +539,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
530 */ 539 */
531 bdp = fep->cur_tx; 540 bdp = fep->cur_tx;
532 541
533 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { 542 if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
534 netif_stop_queue(dev); 543 netif_stop_queue(dev);
535 spin_unlock(&fep->tx_lock); 544 spin_unlock(&fep->tx_lock);
536 545
@@ -543,35 +552,42 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
543 } 552 }
544 553
545 curidx = bdp - fep->tx_bd_base; 554 curidx = bdp - fep->tx_bd_base;
546 /*
547 * Clear all of the status flags.
548 */
549 CBDC_SC(bdp, BD_ENET_TX_STATS);
550
551 /*
552 * Save skb pointer.
553 */
554 fep->tx_skbuff[curidx] = skb;
555
556 fep->stats.tx_bytes += skb->len;
557 555
556 len = skb->len;
557 fep->stats.tx_bytes += len;
558 if (nr_frags)
559 len -= skb->data_len;
560 fep->tx_free -= nr_frags + 1;
558 /* 561 /*
559 * Push the data cache so the CPM does not get stale memory data. 562 * Push the data cache so the CPM does not get stale memory data.
560 */ 563 */
561 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, 564 CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
562 skb->data, skb->len, DMA_TO_DEVICE)); 565 skb->data, len, DMA_TO_DEVICE));
563 CBDW_DATLEN(bdp, skb->len); 566 CBDW_DATLEN(bdp, len);
567
568 fep->mapped_as_page[curidx] = 0;
569 frag = skb_shinfo(skb)->frags;
570 while (nr_frags) {
571 CBDC_SC(bdp,
572 BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
573 CBDS_SC(bdp, BD_ENET_TX_READY);
574
575 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
576 bdp++, curidx++;
577 else
578 bdp = fep->tx_bd_base, curidx = 0;
564 579
565 /* 580 len = skb_frag_size(frag);
566 * If this was the last BD in the ring, start at the beginning again. 581 CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
567 */ 582 DMA_TO_DEVICE));
568 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 583 CBDW_DATLEN(bdp, len);
569 fep->cur_tx++;
570 else
571 fep->cur_tx = fep->tx_bd_base;
572 584
573 if (!--fep->tx_free) 585 fep->tx_skbuff[curidx] = NULL;
574 netif_stop_queue(dev); 586 fep->mapped_as_page[curidx] = 1;
587
588 frag++;
589 nr_frags--;
590 }
575 591
576 /* Trigger transmission start */ 592 /* Trigger transmission start */
577 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | 593 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
@@ -582,8 +598,22 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
582 * yay for hw reuse :) */ 598 * yay for hw reuse :) */
583 if (skb->len <= 60) 599 if (skb->len <= 60)
584 sc |= BD_ENET_TX_PAD; 600 sc |= BD_ENET_TX_PAD;
601 CBDC_SC(bdp, BD_ENET_TX_STATS);
585 CBDS_SC(bdp, sc); 602 CBDS_SC(bdp, sc);
586 603
604 /* Save skb pointer. */
605 fep->tx_skbuff[curidx] = skb;
606
607 /* If this was the last BD in the ring, start at the beginning again. */
608 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
609 bdp++;
610 else
611 bdp = fep->tx_bd_base;
612 fep->cur_tx = bdp;
613
614 if (fep->tx_free < MAX_SKB_FRAGS)
615 netif_stop_queue(dev);
616
587 skb_tx_timestamp(skb); 617 skb_tx_timestamp(skb);
588 618
589 (*fep->ops->tx_kickstart)(dev); 619 (*fep->ops->tx_kickstart)(dev);
@@ -917,7 +947,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
917 } 947 }
918 948
919 fpi->rx_ring = 32; 949 fpi->rx_ring = 32;
920 fpi->tx_ring = 32; 950 fpi->tx_ring = 64;
921 fpi->rx_copybreak = 240; 951 fpi->rx_copybreak = 240;
922 fpi->napi_weight = 17; 952 fpi->napi_weight = 17;
923 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); 953 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
@@ -955,7 +985,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
955 985
956 privsize = sizeof(*fep) + 986 privsize = sizeof(*fep) +
957 sizeof(struct sk_buff **) * 987 sizeof(struct sk_buff **) *
958 (fpi->rx_ring + fpi->tx_ring); 988 (fpi->rx_ring + fpi->tx_ring) +
989 sizeof(char) * fpi->tx_ring;
959 990
960 ndev = alloc_etherdev(privsize); 991 ndev = alloc_etherdev(privsize);
961 if (!ndev) { 992 if (!ndev) {
@@ -978,6 +1009,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
978 1009
979 fep->rx_skbuff = (struct sk_buff **)&fep[1]; 1010 fep->rx_skbuff = (struct sk_buff **)&fep[1];
980 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; 1011 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1012 fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring +
1013 fpi->tx_ring);
981 1014
982 spin_lock_init(&fep->lock); 1015 spin_lock_init(&fep->lock);
983 spin_lock_init(&fep->tx_lock); 1016 spin_lock_init(&fep->tx_lock);
@@ -1007,6 +1040,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
1007 1040
1008 netif_carrier_off(ndev); 1041 netif_carrier_off(ndev);
1009 1042
1043 ndev->features |= NETIF_F_SG;
1044
1010 ret = register_netdev(ndev); 1045 ret = register_netdev(ndev);
1011 if (ret) 1046 if (ret)
1012 goto out_free_bd; 1047 goto out_free_bd;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 3a4b49e0e717..f184d8f952e2 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -134,6 +134,7 @@ struct fs_enet_private {
134 void __iomem *ring_base; 134 void __iomem *ring_base;
135 struct sk_buff **rx_skbuff; 135 struct sk_buff **rx_skbuff;
136 struct sk_buff **tx_skbuff; 136 struct sk_buff **tx_skbuff;
137 char *mapped_as_page;
137 cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */ 138 cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */
138 cbd_t __iomem *tx_bd_base; 139 cbd_t __iomem *tx_bd_base;
139 cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */ 140 cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 5645342f5b28..43df78882e48 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -116,7 +116,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116static void gfar_reset_task(struct work_struct *work); 116static void gfar_reset_task(struct work_struct *work);
117static void gfar_timeout(struct net_device *dev); 117static void gfar_timeout(struct net_device *dev);
118static int gfar_close(struct net_device *dev); 118static int gfar_close(struct net_device *dev);
119struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr); 119static struct sk_buff *gfar_new_skb(struct net_device *dev,
120 dma_addr_t *bufaddr);
120static int gfar_set_mac_address(struct net_device *dev); 121static int gfar_set_mac_address(struct net_device *dev);
121static int gfar_change_mtu(struct net_device *dev, int new_mtu); 122static int gfar_change_mtu(struct net_device *dev, int new_mtu);
122static irqreturn_t gfar_error(int irq, void *dev_id); 123static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -176,7 +177,7 @@ static int gfar_init_bds(struct net_device *ndev)
176 struct gfar_priv_rx_q *rx_queue = NULL; 177 struct gfar_priv_rx_q *rx_queue = NULL;
177 struct txbd8 *txbdp; 178 struct txbd8 *txbdp;
178 struct rxbd8 *rxbdp; 179 struct rxbd8 *rxbdp;
179 u32 *rfbptr; 180 u32 __iomem *rfbptr;
180 int i, j; 181 int i, j;
181 dma_addr_t bufaddr; 182 dma_addr_t bufaddr;
182 183
@@ -554,7 +555,7 @@ static void gfar_ints_enable(struct gfar_private *priv)
554 } 555 }
555} 556}
556 557
557void lock_tx_qs(struct gfar_private *priv) 558static void lock_tx_qs(struct gfar_private *priv)
558{ 559{
559 int i; 560 int i;
560 561
@@ -562,7 +563,7 @@ void lock_tx_qs(struct gfar_private *priv)
562 spin_lock(&priv->tx_queue[i]->txlock); 563 spin_lock(&priv->tx_queue[i]->txlock);
563} 564}
564 565
565void unlock_tx_qs(struct gfar_private *priv) 566static void unlock_tx_qs(struct gfar_private *priv)
566{ 567{
567 int i; 568 int i;
568 569
@@ -763,7 +764,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
763 u32 *tx_queues, *rx_queues; 764 u32 *tx_queues, *rx_queues;
764 unsigned short mode, poll_mode; 765 unsigned short mode, poll_mode;
765 766
766 if (!np || !of_device_is_available(np)) 767 if (!np)
767 return -ENODEV; 768 return -ENODEV;
768 769
769 if (of_device_is_compatible(np, "fsl,etsec2")) { 770 if (of_device_is_compatible(np, "fsl,etsec2")) {
@@ -2169,7 +2170,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2169void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 2170void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2170{ 2171{
2171 fcb->flags |= TXFCB_VLN; 2172 fcb->flags |= TXFCB_VLN;
2172 fcb->vlctl = vlan_tx_tag_get(skb); 2173 fcb->vlctl = skb_vlan_tag_get(skb);
2173} 2174}
2174 2175
2175static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 2176static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
@@ -2229,7 +2230,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2229 regs = tx_queue->grp->regs; 2230 regs = tx_queue->grp->regs;
2230 2231
2231 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); 2232 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2232 do_vlan = vlan_tx_tag_present(skb); 2233 do_vlan = skb_vlan_tag_present(skb);
2233 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 2234 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2234 priv->hwts_tx_en; 2235 priv->hwts_tx_en;
2235 2236
@@ -2671,7 +2672,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2671 return skb; 2672 return skb;
2672} 2673}
2673 2674
2674struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr) 2675static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
2675{ 2676{
2676 struct gfar_private *priv = netdev_priv(dev); 2677 struct gfar_private *priv = netdev_priv(dev);
2677 struct sk_buff *skb; 2678 struct sk_buff *skb;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index b581b8823a2a..9e1802400c23 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1039,7 +1039,7 @@ struct gfar_priv_rx_q {
1039 /* RX Coalescing values */ 1039 /* RX Coalescing values */
1040 unsigned char rxcoalescing; 1040 unsigned char rxcoalescing;
1041 unsigned long rxic; 1041 unsigned long rxic;
1042 u32 *rfbptr; 1042 u32 __iomem *rfbptr;
1043}; 1043};
1044 1044
1045enum gfar_irqinfo_id { 1045enum gfar_irqinfo_id {
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 6e7db66069aa..3a83bc2c613c 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -32,18 +32,19 @@ struct tgec_mdio_controller {
32 __be32 mdio_addr; /* MDIO address */ 32 __be32 mdio_addr; /* MDIO address */
33} __packed; 33} __packed;
34 34
35#define MDIO_STAT_ENC BIT(6)
35#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8) 36#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
36#define MDIO_STAT_BSY (1 << 0) 37#define MDIO_STAT_BSY BIT(0)
37#define MDIO_STAT_RD_ER (1 << 1) 38#define MDIO_STAT_RD_ER BIT(1)
38#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f) 39#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
39#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5) 40#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
40#define MDIO_CTL_PRE_DIS (1 << 10) 41#define MDIO_CTL_PRE_DIS BIT(10)
41#define MDIO_CTL_SCAN_EN (1 << 11) 42#define MDIO_CTL_SCAN_EN BIT(11)
42#define MDIO_CTL_POST_INC (1 << 14) 43#define MDIO_CTL_POST_INC BIT(14)
43#define MDIO_CTL_READ (1 << 15) 44#define MDIO_CTL_READ BIT(15)
44 45
45#define MDIO_DATA(x) (x & 0xffff) 46#define MDIO_DATA(x) (x & 0xffff)
46#define MDIO_DATA_BSY (1 << 31) 47#define MDIO_DATA_BSY BIT(31)
47 48
48/* 49/*
49 * Wait until the MDIO bus is free 50 * Wait until the MDIO bus is free
@@ -51,12 +52,16 @@ struct tgec_mdio_controller {
51static int xgmac_wait_until_free(struct device *dev, 52static int xgmac_wait_until_free(struct device *dev,
52 struct tgec_mdio_controller __iomem *regs) 53 struct tgec_mdio_controller __iomem *regs)
53{ 54{
54 uint32_t status; 55 unsigned int timeout;
55 56
56 /* Wait till the bus is free */ 57 /* Wait till the bus is free */
57 status = spin_event_timeout( 58 timeout = TIMEOUT;
58 !((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0); 59 while ((ioread32be(&regs->mdio_stat) & MDIO_STAT_BSY) && timeout) {
59 if (!status) { 60 cpu_relax();
61 timeout--;
62 }
63
64 if (!timeout) {
60 dev_err(dev, "timeout waiting for bus to be free\n"); 65 dev_err(dev, "timeout waiting for bus to be free\n");
61 return -ETIMEDOUT; 66 return -ETIMEDOUT;
62 } 67 }
@@ -70,12 +75,16 @@ static int xgmac_wait_until_free(struct device *dev,
70static int xgmac_wait_until_done(struct device *dev, 75static int xgmac_wait_until_done(struct device *dev,
71 struct tgec_mdio_controller __iomem *regs) 76 struct tgec_mdio_controller __iomem *regs)
72{ 77{
73 uint32_t status; 78 unsigned int timeout;
74 79
75 /* Wait till the MDIO write is complete */ 80 /* Wait till the MDIO write is complete */
76 status = spin_event_timeout( 81 timeout = TIMEOUT;
77 !((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0); 82 while ((ioread32be(&regs->mdio_data) & MDIO_DATA_BSY) && timeout) {
78 if (!status) { 83 cpu_relax();
84 timeout--;
85 }
86
87 if (!timeout) {
79 dev_err(dev, "timeout waiting for operation to complete\n"); 88 dev_err(dev, "timeout waiting for operation to complete\n");
80 return -ETIMEDOUT; 89 return -ETIMEDOUT;
81 } 90 }
@@ -91,29 +100,42 @@ static int xgmac_wait_until_done(struct device *dev,
91static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) 100static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
92{ 101{
93 struct tgec_mdio_controller __iomem *regs = bus->priv; 102 struct tgec_mdio_controller __iomem *regs = bus->priv;
94 uint16_t dev_addr = regnum >> 16; 103 uint16_t dev_addr;
104 u32 mdio_ctl, mdio_stat;
95 int ret; 105 int ret;
96 106
97 /* Setup the MII Mgmt clock speed */ 107 mdio_stat = ioread32be(&regs->mdio_stat);
98 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100)); 108 if (regnum & MII_ADDR_C45) {
109 /* Clause 45 (ie 10G) */
110 dev_addr = (regnum >> 16) & 0x1f;
111 mdio_stat |= MDIO_STAT_ENC;
112 } else {
113 /* Clause 22 (ie 1G) */
114 dev_addr = regnum & 0x1f;
115 mdio_stat &= ~MDIO_STAT_ENC;
116 }
117
118 iowrite32be(mdio_stat, &regs->mdio_stat);
99 119
100 ret = xgmac_wait_until_free(&bus->dev, regs); 120 ret = xgmac_wait_until_free(&bus->dev, regs);
101 if (ret) 121 if (ret)
102 return ret; 122 return ret;
103 123
104 /* Set the port and dev addr */ 124 /* Set the port and dev addr */
105 out_be32(&regs->mdio_ctl, 125 mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
106 MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr)); 126 iowrite32be(mdio_ctl, &regs->mdio_ctl);
107 127
108 /* Set the register address */ 128 /* Set the register address */
109 out_be32(&regs->mdio_addr, regnum & 0xffff); 129 if (regnum & MII_ADDR_C45) {
130 iowrite32be(regnum & 0xffff, &regs->mdio_addr);
110 131
111 ret = xgmac_wait_until_free(&bus->dev, regs); 132 ret = xgmac_wait_until_free(&bus->dev, regs);
112 if (ret) 133 if (ret)
113 return ret; 134 return ret;
135 }
114 136
115 /* Write the value to the register */ 137 /* Write the value to the register */
116 out_be32(&regs->mdio_data, MDIO_DATA(value)); 138 iowrite32be(MDIO_DATA(value), &regs->mdio_data);
117 139
118 ret = xgmac_wait_until_done(&bus->dev, regs); 140 ret = xgmac_wait_until_done(&bus->dev, regs);
119 if (ret) 141 if (ret)
@@ -130,13 +152,22 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
130static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) 152static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
131{ 153{
132 struct tgec_mdio_controller __iomem *regs = bus->priv; 154 struct tgec_mdio_controller __iomem *regs = bus->priv;
133 uint16_t dev_addr = regnum >> 16; 155 uint16_t dev_addr;
156 uint32_t mdio_stat;
134 uint32_t mdio_ctl; 157 uint32_t mdio_ctl;
135 uint16_t value; 158 uint16_t value;
136 int ret; 159 int ret;
137 160
138 /* Setup the MII Mgmt clock speed */ 161 mdio_stat = ioread32be(&regs->mdio_stat);
139 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100)); 162 if (regnum & MII_ADDR_C45) {
163 dev_addr = (regnum >> 16) & 0x1f;
164 mdio_stat |= MDIO_STAT_ENC;
165 } else {
166 dev_addr = regnum & 0x1f;
167 mdio_stat &= ~MDIO_STAT_ENC;
168 }
169
170 iowrite32be(mdio_stat, &regs->mdio_stat);
140 171
141 ret = xgmac_wait_until_free(&bus->dev, regs); 172 ret = xgmac_wait_until_free(&bus->dev, regs);
142 if (ret) 173 if (ret)
@@ -144,54 +175,38 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
144 175
145 /* Set the Port and Device Addrs */ 176 /* Set the Port and Device Addrs */
146 mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); 177 mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
147 out_be32(&regs->mdio_ctl, mdio_ctl); 178 iowrite32be(mdio_ctl, &regs->mdio_ctl);
148 179
149 /* Set the register address */ 180 /* Set the register address */
150 out_be32(&regs->mdio_addr, regnum & 0xffff); 181 if (regnum & MII_ADDR_C45) {
182 iowrite32be(regnum & 0xffff, &regs->mdio_addr);
151 183
152 ret = xgmac_wait_until_free(&bus->dev, regs); 184 ret = xgmac_wait_until_free(&bus->dev, regs);
153 if (ret) 185 if (ret)
154 return ret; 186 return ret;
187 }
155 188
156 /* Initiate the read */ 189 /* Initiate the read */
157 out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ); 190 iowrite32be(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl);
158 191
159 ret = xgmac_wait_until_done(&bus->dev, regs); 192 ret = xgmac_wait_until_done(&bus->dev, regs);
160 if (ret) 193 if (ret)
161 return ret; 194 return ret;
162 195
163 /* Return all Fs if nothing was there */ 196 /* Return all Fs if nothing was there */
164 if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) { 197 if (ioread32be(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
165 dev_err(&bus->dev, 198 dev_err(&bus->dev,
166 "Error while reading PHY%d reg at %d.%hhu\n", 199 "Error while reading PHY%d reg at %d.%hhu\n",
167 phy_id, dev_addr, regnum); 200 phy_id, dev_addr, regnum);
168 return 0xffff; 201 return 0xffff;
169 } 202 }
170 203
171 value = in_be32(&regs->mdio_data) & 0xffff; 204 value = ioread32be(&regs->mdio_data) & 0xffff;
172 dev_dbg(&bus->dev, "read %04x\n", value); 205 dev_dbg(&bus->dev, "read %04x\n", value);
173 206
174 return value; 207 return value;
175} 208}
176 209
177/* Reset the MIIM registers, and wait for the bus to free */
178static int xgmac_mdio_reset(struct mii_bus *bus)
179{
180 struct tgec_mdio_controller __iomem *regs = bus->priv;
181 int ret;
182
183 mutex_lock(&bus->mdio_lock);
184
185 /* Setup the MII Mgmt clock speed */
186 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
187
188 ret = xgmac_wait_until_free(&bus->dev, regs);
189
190 mutex_unlock(&bus->mdio_lock);
191
192 return ret;
193}
194
195static int xgmac_mdio_probe(struct platform_device *pdev) 210static int xgmac_mdio_probe(struct platform_device *pdev)
196{ 211{
197 struct device_node *np = pdev->dev.of_node; 212 struct device_node *np = pdev->dev.of_node;
@@ -205,15 +220,13 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
205 return ret; 220 return ret;
206 } 221 }
207 222
208 bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int)); 223 bus = mdiobus_alloc();
209 if (!bus) 224 if (!bus)
210 return -ENOMEM; 225 return -ENOMEM;
211 226
212 bus->name = "Freescale XGMAC MDIO Bus"; 227 bus->name = "Freescale XGMAC MDIO Bus";
213 bus->read = xgmac_mdio_read; 228 bus->read = xgmac_mdio_read;
214 bus->write = xgmac_mdio_write; 229 bus->write = xgmac_mdio_write;
215 bus->reset = xgmac_mdio_reset;
216 bus->irq = bus->priv;
217 bus->parent = &pdev->dev; 230 bus->parent = &pdev->dev;
218 snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); 231 snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
219 232
@@ -258,6 +271,9 @@ static struct of_device_id xgmac_mdio_match[] = {
258 { 271 {
259 .compatible = "fsl,fman-xmdio", 272 .compatible = "fsl,fman-xmdio",
260 }, 273 },
274 {
275 .compatible = "fsl,fman-memac-mdio",
276 },
261 {}, 277 {},
262}; 278};
263MODULE_DEVICE_TABLE(of, xgmac_mdio_match); 279MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index e9421731b05e..a54d89791311 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -24,4 +24,13 @@ config HIX5HD2_GMAC
24 help 24 help
25 This selects the hix5hd2 mac family network device. 25 This selects the hix5hd2 mac family network device.
26 26
27config HIP04_ETH
28 tristate "HISILICON P04 Ethernet support"
29 select PHYLIB
30 select MARVELL_PHY
31 select MFD_SYSCON
32 ---help---
33 If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
34 want to use the internal ethernet then you should answer Y to this.
35
27endif # NET_VENDOR_HISILICON 36endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 9175e84622d4..6c14540a4dc5 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o 5obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
6obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o hip04_eth.o
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
new file mode 100644
index 000000000000..b72d238695d7
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -0,0 +1,971 @@
1
2/* Copyright (c) 2014 Linaro Ltd.
3 * Copyright (c) 2014 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/module.h>
12#include <linux/etherdevice.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
15#include <linux/ktime.h>
16#include <linux/of_address.h>
17#include <linux/phy.h>
18#include <linux/of_mdio.h>
19#include <linux/of_net.h>
20#include <linux/mfd/syscon.h>
21#include <linux/regmap.h>
22
23#define PPE_CFG_RX_ADDR 0x100
24#define PPE_CFG_POOL_GRP 0x300
25#define PPE_CFG_RX_BUF_SIZE 0x400
26#define PPE_CFG_RX_FIFO_SIZE 0x500
27#define PPE_CURR_BUF_CNT 0xa200
28
29#define GE_DUPLEX_TYPE 0x08
30#define GE_MAX_FRM_SIZE_REG 0x3c
31#define GE_PORT_MODE 0x40
32#define GE_PORT_EN 0x44
33#define GE_SHORT_RUNTS_THR_REG 0x50
34#define GE_TX_LOCAL_PAGE_REG 0x5c
35#define GE_TRANSMIT_CONTROL_REG 0x60
36#define GE_CF_CRC_STRIP_REG 0x1b0
37#define GE_MODE_CHANGE_REG 0x1b4
38#define GE_RECV_CONTROL_REG 0x1e0
39#define GE_STATION_MAC_ADDRESS 0x210
40#define PPE_CFG_CPU_ADD_ADDR 0x580
41#define PPE_CFG_MAX_FRAME_LEN_REG 0x408
42#define PPE_CFG_BUS_CTRL_REG 0x424
43#define PPE_CFG_RX_CTRL_REG 0x428
44#define PPE_CFG_RX_PKT_MODE_REG 0x438
45#define PPE_CFG_QOS_VMID_GEN 0x500
46#define PPE_CFG_RX_PKT_INT 0x538
47#define PPE_INTEN 0x600
48#define PPE_INTSTS 0x608
49#define PPE_RINT 0x604
50#define PPE_CFG_STS_MODE 0x700
51#define PPE_HIS_RX_PKT_CNT 0x804
52
53/* REG_INTERRUPT */
54#define RCV_INT BIT(10)
55#define RCV_NOBUF BIT(8)
56#define RCV_DROP BIT(7)
57#define TX_DROP BIT(6)
58#define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
59#define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
60
61/* TX descriptor config */
62#define TX_FREE_MEM BIT(0)
63#define TX_READ_ALLOC_L3 BIT(1)
64#define TX_FINISH_CACHE_INV BIT(2)
65#define TX_CLEAR_WB BIT(4)
66#define TX_L3_CHECKSUM BIT(5)
67#define TX_LOOP_BACK BIT(11)
68
69/* RX error */
70#define RX_PKT_DROP BIT(0)
71#define RX_L2_ERR BIT(1)
72#define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
73
74#define SGMII_SPEED_1000 0x08
75#define SGMII_SPEED_100 0x07
76#define SGMII_SPEED_10 0x06
77#define MII_SPEED_100 0x01
78#define MII_SPEED_10 0x00
79
80#define GE_DUPLEX_FULL BIT(0)
81#define GE_DUPLEX_HALF 0x00
82#define GE_MODE_CHANGE_EN BIT(0)
83
84#define GE_TX_AUTO_NEG BIT(5)
85#define GE_TX_ADD_CRC BIT(6)
86#define GE_TX_SHORT_PAD_THROUGH BIT(7)
87
88#define GE_RX_STRIP_CRC BIT(0)
89#define GE_RX_STRIP_PAD BIT(3)
90#define GE_RX_PAD_EN BIT(4)
91
92#define GE_AUTO_NEG_CTL BIT(0)
93
94#define GE_RX_INT_THRESHOLD BIT(6)
95#define GE_RX_TIMEOUT 0x04
96
97#define GE_RX_PORT_EN BIT(1)
98#define GE_TX_PORT_EN BIT(2)
99
100#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
101
102#define PPE_CFG_RX_PKT_ALIGN BIT(18)
103#define PPE_CFG_QOS_VMID_MODE BIT(14)
104#define PPE_CFG_QOS_VMID_GRP_SHIFT 8
105
106#define PPE_CFG_RX_FIFO_FSFU BIT(11)
107#define PPE_CFG_RX_DEPTH_SHIFT 16
108#define PPE_CFG_RX_START_SHIFT 0
109#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
110
111#define PPE_CFG_BUS_LOCAL_REL BIT(14)
112#define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
113
114#define RX_DESC_NUM 128
115#define TX_DESC_NUM 256
116#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
117#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
118
119#define GMAC_PPE_RX_PKT_MAX_LEN 379
120#define GMAC_MAX_PKT_LEN 1516
121#define GMAC_MIN_PKT_LEN 31
122#define RX_BUF_SIZE 1600
123#define RESET_TIMEOUT 1000
124#define TX_TIMEOUT (6 * HZ)
125
126#define DRV_NAME "hip04-ether"
127#define DRV_VERSION "v1.0"
128
129#define HIP04_MAX_TX_COALESCE_USECS 200
130#define HIP04_MIN_TX_COALESCE_USECS 100
131#define HIP04_MAX_TX_COALESCE_FRAMES 200
132#define HIP04_MIN_TX_COALESCE_FRAMES 100
133
134struct tx_desc {
135 u32 send_addr;
136 u32 send_size;
137 u32 next_addr;
138 u32 cfg;
139 u32 wb_addr;
140} __aligned(64);
141
142struct rx_desc {
143 u16 reserved_16;
144 u16 pkt_len;
145 u32 reserve1[3];
146 u32 pkt_err;
147 u32 reserve2[4];
148};
149
150struct hip04_priv {
151 void __iomem *base;
152 int phy_mode;
153 int chan;
154 unsigned int port;
155 unsigned int speed;
156 unsigned int duplex;
157 unsigned int reg_inten;
158
159 struct napi_struct napi;
160 struct net_device *ndev;
161
162 struct tx_desc *tx_desc;
163 dma_addr_t tx_desc_dma;
164 struct sk_buff *tx_skb[TX_DESC_NUM];
165 dma_addr_t tx_phys[TX_DESC_NUM];
166 unsigned int tx_head;
167
168 int tx_coalesce_frames;
169 int tx_coalesce_usecs;
170 struct hrtimer tx_coalesce_timer;
171
172 unsigned char *rx_buf[RX_DESC_NUM];
173 dma_addr_t rx_phys[RX_DESC_NUM];
174 unsigned int rx_head;
175 unsigned int rx_buf_size;
176
177 struct device_node *phy_node;
178 struct phy_device *phy;
179 struct regmap *map;
180 struct work_struct tx_timeout_task;
181
182 /* written only by tx cleanup */
183 unsigned int tx_tail ____cacheline_aligned_in_smp;
184};
185
186static inline unsigned int tx_count(unsigned int head, unsigned int tail)
187{
188 return (head - tail) % (TX_DESC_NUM - 1);
189}
190
191static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
192{
193 struct hip04_priv *priv = netdev_priv(ndev);
194 u32 val;
195
196 priv->speed = speed;
197 priv->duplex = duplex;
198
199 switch (priv->phy_mode) {
200 case PHY_INTERFACE_MODE_SGMII:
201 if (speed == SPEED_1000)
202 val = SGMII_SPEED_1000;
203 else if (speed == SPEED_100)
204 val = SGMII_SPEED_100;
205 else
206 val = SGMII_SPEED_10;
207 break;
208 case PHY_INTERFACE_MODE_MII:
209 if (speed == SPEED_100)
210 val = MII_SPEED_100;
211 else
212 val = MII_SPEED_10;
213 break;
214 default:
215 netdev_warn(ndev, "not supported mode\n");
216 val = MII_SPEED_10;
217 break;
218 }
219 writel_relaxed(val, priv->base + GE_PORT_MODE);
220
221 val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
222 writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
223
224 val = GE_MODE_CHANGE_EN;
225 writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
226}
227
228static void hip04_reset_ppe(struct hip04_priv *priv)
229{
230 u32 val, tmp, timeout = 0;
231
232 do {
233 regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
234 regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
235 if (timeout++ > RESET_TIMEOUT)
236 break;
237 } while (val & 0xfff);
238}
239
240static void hip04_config_fifo(struct hip04_priv *priv)
241{
242 u32 val;
243
244 val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
245 val |= PPE_CFG_STS_RX_PKT_CNT_RC;
246 writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
247
248 val = BIT(priv->port);
249 regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
250
251 val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT;
252 val |= PPE_CFG_QOS_VMID_MODE;
253 writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
254
255 val = RX_BUF_SIZE;
256 regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
257
258 val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
259 val |= PPE_CFG_RX_FIFO_FSFU;
260 val |= priv->chan << PPE_CFG_RX_START_SHIFT;
261 regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
262
263 val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
264 writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
265
266 val = PPE_CFG_RX_PKT_ALIGN;
267 writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
268
269 val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
270 writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
271
272 val = GMAC_PPE_RX_PKT_MAX_LEN;
273 writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
274
275 val = GMAC_MAX_PKT_LEN;
276 writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
277
278 val = GMAC_MIN_PKT_LEN;
279 writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
280
281 val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
282 val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
283 writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
284
285 val = GE_RX_STRIP_CRC;
286 writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
287
288 val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
289 val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
290 writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
291
292 val = GE_AUTO_NEG_CTL;
293 writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
294}
295
296static void hip04_mac_enable(struct net_device *ndev)
297{
298 struct hip04_priv *priv = netdev_priv(ndev);
299 u32 val;
300
301 /* enable tx & rx */
302 val = readl_relaxed(priv->base + GE_PORT_EN);
303 val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
304 writel_relaxed(val, priv->base + GE_PORT_EN);
305
306 /* clear rx int */
307 val = RCV_INT;
308 writel_relaxed(val, priv->base + PPE_RINT);
309
310 /* config recv int */
311 val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
312 writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
313
314 /* enable interrupt */
315 priv->reg_inten = DEF_INT_MASK;
316 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
317}
318
319static void hip04_mac_disable(struct net_device *ndev)
320{
321 struct hip04_priv *priv = netdev_priv(ndev);
322 u32 val;
323
324 /* disable int */
325 priv->reg_inten &= ~(DEF_INT_MASK);
326 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
327
328 /* disable tx & rx */
329 val = readl_relaxed(priv->base + GE_PORT_EN);
330 val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
331 writel_relaxed(val, priv->base + GE_PORT_EN);
332}
333
334static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
335{
336 writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR);
337}
338
339static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
340{
341 regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys);
342}
343
344static u32 hip04_recv_cnt(struct hip04_priv *priv)
345{
346 return readl(priv->base + PPE_HIS_RX_PKT_CNT);
347}
348
349static void hip04_update_mac_address(struct net_device *ndev)
350{
351 struct hip04_priv *priv = netdev_priv(ndev);
352
353 writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
354 priv->base + GE_STATION_MAC_ADDRESS);
355 writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
356 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
357 priv->base + GE_STATION_MAC_ADDRESS + 4);
358}
359
360static int hip04_set_mac_address(struct net_device *ndev, void *addr)
361{
362 eth_mac_addr(ndev, addr);
363 hip04_update_mac_address(ndev);
364 return 0;
365}
366
367static int hip04_tx_reclaim(struct net_device *ndev, bool force)
368{
369 struct hip04_priv *priv = netdev_priv(ndev);
370 unsigned tx_tail = priv->tx_tail;
371 struct tx_desc *desc;
372 unsigned int bytes_compl = 0, pkts_compl = 0;
373 unsigned int count;
374
375 smp_rmb();
376 count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
377 if (count == 0)
378 goto out;
379
380 while (count) {
381 desc = &priv->tx_desc[tx_tail];
382 if (desc->send_addr != 0) {
383 if (force)
384 desc->send_addr = 0;
385 else
386 break;
387 }
388
389 if (priv->tx_phys[tx_tail]) {
390 dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
391 priv->tx_skb[tx_tail]->len,
392 DMA_TO_DEVICE);
393 priv->tx_phys[tx_tail] = 0;
394 }
395 pkts_compl++;
396 bytes_compl += priv->tx_skb[tx_tail]->len;
397 dev_kfree_skb(priv->tx_skb[tx_tail]);
398 priv->tx_skb[tx_tail] = NULL;
399 tx_tail = TX_NEXT(tx_tail);
400 count--;
401 }
402
403 priv->tx_tail = tx_tail;
404 smp_wmb(); /* Ensure tx_tail visible to xmit */
405
406out:
407 if (pkts_compl || bytes_compl)
408 netdev_completed_queue(ndev, pkts_compl, bytes_compl);
409
410 if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
411 netif_wake_queue(ndev);
412
413 return count;
414}
415
416static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
417{
418 struct hip04_priv *priv = netdev_priv(ndev);
419 struct net_device_stats *stats = &ndev->stats;
420 unsigned int tx_head = priv->tx_head, count;
421 struct tx_desc *desc = &priv->tx_desc[tx_head];
422 dma_addr_t phys;
423
424 smp_rmb();
425 count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
426 if (count == (TX_DESC_NUM - 1)) {
427 netif_stop_queue(ndev);
428 return NETDEV_TX_BUSY;
429 }
430
431 phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
432 if (dma_mapping_error(&ndev->dev, phys)) {
433 dev_kfree_skb(skb);
434 return NETDEV_TX_OK;
435 }
436
437 priv->tx_skb[tx_head] = skb;
438 priv->tx_phys[tx_head] = phys;
439 desc->send_addr = cpu_to_be32(phys);
440 desc->send_size = cpu_to_be32(skb->len);
441 desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
442 phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
443 desc->wb_addr = cpu_to_be32(phys);
444 skb_tx_timestamp(skb);
445
446 hip04_set_xmit_desc(priv, phys);
447 priv->tx_head = TX_NEXT(tx_head);
448 count++;
449 netdev_sent_queue(ndev, skb->len);
450
451 stats->tx_bytes += skb->len;
452 stats->tx_packets++;
453
454 /* Ensure tx_head update visible to tx reclaim */
455 smp_wmb();
456
457 /* queue is getting full, better start cleaning up now */
458 if (count >= priv->tx_coalesce_frames) {
459 if (napi_schedule_prep(&priv->napi)) {
460 /* disable rx interrupt and timer */
461 priv->reg_inten &= ~(RCV_INT);
462 writel_relaxed(DEF_INT_MASK & ~RCV_INT,
463 priv->base + PPE_INTEN);
464 hrtimer_cancel(&priv->tx_coalesce_timer);
465 __napi_schedule(&priv->napi);
466 }
467 } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
468 /* cleanup not pending yet, start a new timer */
469 hrtimer_start_expires(&priv->tx_coalesce_timer,
470 HRTIMER_MODE_REL);
471 }
472
473 return NETDEV_TX_OK;
474}
475
476static int hip04_rx_poll(struct napi_struct *napi, int budget)
477{
478 struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
479 struct net_device *ndev = priv->ndev;
480 struct net_device_stats *stats = &ndev->stats;
481 unsigned int cnt = hip04_recv_cnt(priv);
482 struct rx_desc *desc;
483 struct sk_buff *skb;
484 unsigned char *buf;
485 bool last = false;
486 dma_addr_t phys;
487 int rx = 0;
488 int tx_remaining;
489 u16 len;
490 u32 err;
491
492 while (cnt && !last) {
493 buf = priv->rx_buf[priv->rx_head];
494 skb = build_skb(buf, priv->rx_buf_size);
495 if (unlikely(!skb))
496 net_dbg_ratelimited("build_skb failed\n");
497
498 dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
499 RX_BUF_SIZE, DMA_FROM_DEVICE);
500 priv->rx_phys[priv->rx_head] = 0;
501
502 desc = (struct rx_desc *)skb->data;
503 len = be16_to_cpu(desc->pkt_len);
504 err = be32_to_cpu(desc->pkt_err);
505
506 if (0 == len) {
507 dev_kfree_skb_any(skb);
508 last = true;
509 } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
510 dev_kfree_skb_any(skb);
511 stats->rx_dropped++;
512 stats->rx_errors++;
513 } else {
514 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
515 skb_put(skb, len);
516 skb->protocol = eth_type_trans(skb, ndev);
517 napi_gro_receive(&priv->napi, skb);
518 stats->rx_packets++;
519 stats->rx_bytes += len;
520 rx++;
521 }
522
523 buf = netdev_alloc_frag(priv->rx_buf_size);
524 if (!buf)
525 goto done;
526 phys = dma_map_single(&ndev->dev, buf,
527 RX_BUF_SIZE, DMA_FROM_DEVICE);
528 if (dma_mapping_error(&ndev->dev, phys))
529 goto done;
530 priv->rx_buf[priv->rx_head] = buf;
531 priv->rx_phys[priv->rx_head] = phys;
532 hip04_set_recv_desc(priv, phys);
533
534 priv->rx_head = RX_NEXT(priv->rx_head);
535 if (rx >= budget)
536 goto done;
537
538 if (--cnt == 0)
539 cnt = hip04_recv_cnt(priv);
540 }
541
542 if (!(priv->reg_inten & RCV_INT)) {
543 /* enable rx interrupt */
544 priv->reg_inten |= RCV_INT;
545 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
546 }
547 napi_complete(napi);
548done:
549 /* clean up tx descriptors and start a new timer if necessary */
550 tx_remaining = hip04_tx_reclaim(ndev, false);
551 if (rx < budget && tx_remaining)
552 hrtimer_start_expires(&priv->tx_coalesce_timer, HRTIMER_MODE_REL);
553
554 return rx;
555}
556
557static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
558{
559 struct net_device *ndev = (struct net_device *)dev_id;
560 struct hip04_priv *priv = netdev_priv(ndev);
561 struct net_device_stats *stats = &ndev->stats;
562 u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
563
564 if (!ists)
565 return IRQ_NONE;
566
567 writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
568
569 if (unlikely(ists & DEF_INT_ERR)) {
570 if (ists & (RCV_NOBUF | RCV_DROP)) {
571 stats->rx_errors++;
572 stats->rx_dropped++;
573 netdev_err(ndev, "rx drop\n");
574 }
575 if (ists & TX_DROP) {
576 stats->tx_dropped++;
577 netdev_err(ndev, "tx drop\n");
578 }
579 }
580
581 if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
582 /* disable rx interrupt */
583 priv->reg_inten &= ~(RCV_INT);
584 writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
585 hrtimer_cancel(&priv->tx_coalesce_timer);
586 __napi_schedule(&priv->napi);
587 }
588
589 return IRQ_HANDLED;
590}
591
592enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
593{
594 struct hip04_priv *priv;
595
596 priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
597
598 if (napi_schedule_prep(&priv->napi)) {
599 /* disable rx interrupt */
600 priv->reg_inten &= ~(RCV_INT);
601 writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
602 __napi_schedule(&priv->napi);
603 }
604
605 return HRTIMER_NORESTART;
606}
607
608static void hip04_adjust_link(struct net_device *ndev)
609{
610 struct hip04_priv *priv = netdev_priv(ndev);
611 struct phy_device *phy = priv->phy;
612
613 if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
614 hip04_config_port(ndev, phy->speed, phy->duplex);
615 phy_print_status(phy);
616 }
617}
618
619static int hip04_mac_open(struct net_device *ndev)
620{
621 struct hip04_priv *priv = netdev_priv(ndev);
622 int i;
623
624 priv->rx_head = 0;
625 priv->tx_head = 0;
626 priv->tx_tail = 0;
627 hip04_reset_ppe(priv);
628
629 for (i = 0; i < RX_DESC_NUM; i++) {
630 dma_addr_t phys;
631
632 phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
633 RX_BUF_SIZE, DMA_FROM_DEVICE);
634 if (dma_mapping_error(&ndev->dev, phys))
635 return -EIO;
636
637 priv->rx_phys[i] = phys;
638 hip04_set_recv_desc(priv, phys);
639 }
640
641 if (priv->phy)
642 phy_start(priv->phy);
643
644 netdev_reset_queue(ndev);
645 netif_start_queue(ndev);
646 hip04_mac_enable(ndev);
647 napi_enable(&priv->napi);
648
649 return 0;
650}
651
652static int hip04_mac_stop(struct net_device *ndev)
653{
654 struct hip04_priv *priv = netdev_priv(ndev);
655 int i;
656
657 napi_disable(&priv->napi);
658 netif_stop_queue(ndev);
659 hip04_mac_disable(ndev);
660 hip04_tx_reclaim(ndev, true);
661 hip04_reset_ppe(priv);
662
663 if (priv->phy)
664 phy_stop(priv->phy);
665
666 for (i = 0; i < RX_DESC_NUM; i++) {
667 if (priv->rx_phys[i]) {
668 dma_unmap_single(&ndev->dev, priv->rx_phys[i],
669 RX_BUF_SIZE, DMA_FROM_DEVICE);
670 priv->rx_phys[i] = 0;
671 }
672 }
673
674 return 0;
675}
676
677static void hip04_timeout(struct net_device *ndev)
678{
679 struct hip04_priv *priv = netdev_priv(ndev);
680
681 schedule_work(&priv->tx_timeout_task);
682}
683
684static void hip04_tx_timeout_task(struct work_struct *work)
685{
686 struct hip04_priv *priv;
687
688 priv = container_of(work, struct hip04_priv, tx_timeout_task);
689 hip04_mac_stop(priv->ndev);
690 hip04_mac_open(priv->ndev);
691}
692
693static struct net_device_stats *hip04_get_stats(struct net_device *ndev)
694{
695 return &ndev->stats;
696}
697
698static int hip04_get_coalesce(struct net_device *netdev,
699 struct ethtool_coalesce *ec)
700{
701 struct hip04_priv *priv = netdev_priv(netdev);
702
703 ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
704 ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
705
706 return 0;
707}
708
709static int hip04_set_coalesce(struct net_device *netdev,
710 struct ethtool_coalesce *ec)
711{
712 struct hip04_priv *priv = netdev_priv(netdev);
713
714 /* Check not supported parameters */
715 if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
716 (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
717 (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
718 (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
719 (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
720 (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
721 (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
722 (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
723 (ec->tx_max_coalesced_frames_irq) ||
724 (ec->stats_block_coalesce_usecs) ||
725 (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
726 return -EOPNOTSUPP;
727
728 if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
729 ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
730 (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
731 ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
732 return -EINVAL;
733
734 priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
735 priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
736
737 return 0;
738}
739
740static void hip04_get_drvinfo(struct net_device *netdev,
741 struct ethtool_drvinfo *drvinfo)
742{
743 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
744 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
745}
746
747static struct ethtool_ops hip04_ethtool_ops = {
748 .get_coalesce = hip04_get_coalesce,
749 .set_coalesce = hip04_set_coalesce,
750 .get_drvinfo = hip04_get_drvinfo,
751};
752
753static struct net_device_ops hip04_netdev_ops = {
754 .ndo_open = hip04_mac_open,
755 .ndo_stop = hip04_mac_stop,
756 .ndo_get_stats = hip04_get_stats,
757 .ndo_start_xmit = hip04_mac_start_xmit,
758 .ndo_set_mac_address = hip04_set_mac_address,
759 .ndo_tx_timeout = hip04_timeout,
760 .ndo_validate_addr = eth_validate_addr,
761 .ndo_change_mtu = eth_change_mtu,
762};
763
764static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
765{
766 struct hip04_priv *priv = netdev_priv(ndev);
767 int i;
768
769 priv->tx_desc = dma_alloc_coherent(d,
770 TX_DESC_NUM * sizeof(struct tx_desc),
771 &priv->tx_desc_dma, GFP_KERNEL);
772 if (!priv->tx_desc)
773 return -ENOMEM;
774
775 priv->rx_buf_size = RX_BUF_SIZE +
776 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
777 for (i = 0; i < RX_DESC_NUM; i++) {
778 priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
779 if (!priv->rx_buf[i])
780 return -ENOMEM;
781 }
782
783 return 0;
784}
785
786static void hip04_free_ring(struct net_device *ndev, struct device *d)
787{
788 struct hip04_priv *priv = netdev_priv(ndev);
789 int i;
790
791 for (i = 0; i < RX_DESC_NUM; i++)
792 if (priv->rx_buf[i])
793 put_page(virt_to_head_page(priv->rx_buf[i]));
794
795 for (i = 0; i < TX_DESC_NUM; i++)
796 if (priv->tx_skb[i])
797 dev_kfree_skb_any(priv->tx_skb[i]);
798
799 dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
800 priv->tx_desc, priv->tx_desc_dma);
801}
802
803static int hip04_mac_probe(struct platform_device *pdev)
804{
805 struct device *d = &pdev->dev;
806 struct device_node *node = d->of_node;
807 struct of_phandle_args arg;
808 struct net_device *ndev;
809 struct hip04_priv *priv;
810 struct resource *res;
811 unsigned int irq;
812 ktime_t txtime;
813 int ret;
814
815 ndev = alloc_etherdev(sizeof(struct hip04_priv));
816 if (!ndev)
817 return -ENOMEM;
818
819 priv = netdev_priv(ndev);
820 priv->ndev = ndev;
821 platform_set_drvdata(pdev, ndev);
822
823 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
824 priv->base = devm_ioremap_resource(d, res);
825 if (IS_ERR(priv->base)) {
826 ret = PTR_ERR(priv->base);
827 goto init_fail;
828 }
829
830 ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg);
831 if (ret < 0) {
832 dev_warn(d, "no port-handle\n");
833 goto init_fail;
834 }
835
836 priv->port = arg.args[0];
837 priv->chan = arg.args[1] * RX_DESC_NUM;
838
839 hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
840
841 /* BQL will try to keep the TX queue as short as possible, but it can't
842 * be faster than tx_coalesce_usecs, so we need a fast timeout here,
843 * but also long enough to gather up enough frames to ensure we don't
844 * get more interrupts than necessary.
845 * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
846 */
847 priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
848 priv->tx_coalesce_usecs = 200;
849 /* allow timer to fire after half the time at the earliest */
850 txtime = ktime_set(0, priv->tx_coalesce_usecs * NSEC_PER_USEC / 2);
851 hrtimer_set_expires_range(&priv->tx_coalesce_timer, txtime, txtime);
852 priv->tx_coalesce_timer.function = tx_done;
853
854 priv->map = syscon_node_to_regmap(arg.np);
855 if (IS_ERR(priv->map)) {
856 dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
857 ret = PTR_ERR(priv->map);
858 goto init_fail;
859 }
860
861 priv->phy_mode = of_get_phy_mode(node);
862 if (priv->phy_mode < 0) {
863 dev_warn(d, "not find phy-mode\n");
864 ret = -EINVAL;
865 goto init_fail;
866 }
867
868 irq = platform_get_irq(pdev, 0);
869 if (irq <= 0) {
870 ret = -EINVAL;
871 goto init_fail;
872 }
873
874 ret = devm_request_irq(d, irq, hip04_mac_interrupt,
875 0, pdev->name, ndev);
876 if (ret) {
877 netdev_err(ndev, "devm_request_irq failed\n");
878 goto init_fail;
879 }
880
881 priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
882 if (priv->phy_node) {
883 priv->phy = of_phy_connect(ndev, priv->phy_node,
884 &hip04_adjust_link,
885 0, priv->phy_mode);
886 if (!priv->phy) {
887 ret = -EPROBE_DEFER;
888 goto init_fail;
889 }
890 }
891
892 INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
893
894 ether_setup(ndev);
895 ndev->netdev_ops = &hip04_netdev_ops;
896 ndev->ethtool_ops = &hip04_ethtool_ops;
897 ndev->watchdog_timeo = TX_TIMEOUT;
898 ndev->priv_flags |= IFF_UNICAST_FLT;
899 ndev->irq = irq;
900 netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
901 SET_NETDEV_DEV(ndev, &pdev->dev);
902
903 hip04_reset_ppe(priv);
904 if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
905 hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
906
907 hip04_config_fifo(priv);
908 random_ether_addr(ndev->dev_addr);
909 hip04_update_mac_address(ndev);
910
911 ret = hip04_alloc_ring(ndev, d);
912 if (ret) {
913 netdev_err(ndev, "alloc ring fail\n");
914 goto alloc_fail;
915 }
916
917 ret = register_netdev(ndev);
918 if (ret) {
919 free_netdev(ndev);
920 goto alloc_fail;
921 }
922
923 return 0;
924
925alloc_fail:
926 hip04_free_ring(ndev, d);
927init_fail:
928 of_node_put(priv->phy_node);
929 free_netdev(ndev);
930 return ret;
931}
932
933static int hip04_remove(struct platform_device *pdev)
934{
935 struct net_device *ndev = platform_get_drvdata(pdev);
936 struct hip04_priv *priv = netdev_priv(ndev);
937 struct device *d = &pdev->dev;
938
939 if (priv->phy)
940 phy_disconnect(priv->phy);
941
942 hip04_free_ring(ndev, d);
943 unregister_netdev(ndev);
944 free_irq(ndev->irq, ndev);
945 of_node_put(priv->phy_node);
946 cancel_work_sync(&priv->tx_timeout_task);
947 free_netdev(ndev);
948
949 return 0;
950}
951
952static const struct of_device_id hip04_mac_match[] = {
953 { .compatible = "hisilicon,hip04-mac" },
954 { }
955};
956
957MODULE_DEVICE_TABLE(of, hip04_mac_match);
958
959static struct platform_driver hip04_mac_driver = {
960 .probe = hip04_mac_probe,
961 .remove = hip04_remove,
962 .driver = {
963 .name = DRV_NAME,
964 .owner = THIS_MODULE,
965 .of_match_table = hip04_mac_match,
966 },
967};
968module_platform_driver(hip04_mac_driver);
969
970MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
971MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
new file mode 100644
index 000000000000..b3bac25db99c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hip04_mdio.c
@@ -0,0 +1,186 @@
1/* Copyright (c) 2014 Linaro Ltd.
2 * Copyright (c) 2014 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/module.h>
11#include <linux/platform_device.h>
12#include <linux/io.h>
13#include <linux/of_mdio.h>
14#include <linux/delay.h>
15
16#define MDIO_CMD_REG 0x0
17#define MDIO_ADDR_REG 0x4
18#define MDIO_WDATA_REG 0x8
19#define MDIO_RDATA_REG 0xc
20#define MDIO_STA_REG 0x10
21
22#define MDIO_START BIT(14)
23#define MDIO_R_VALID BIT(1)
24#define MDIO_READ (BIT(12) | BIT(11) | MDIO_START)
25#define MDIO_WRITE (BIT(12) | BIT(10) | MDIO_START)
26
27struct hip04_mdio_priv {
28 void __iomem *base;
29};
30
31#define WAIT_TIMEOUT 10
32static int hip04_mdio_wait_ready(struct mii_bus *bus)
33{
34 struct hip04_mdio_priv *priv = bus->priv;
35 int i;
36
37 for (i = 0; readl_relaxed(priv->base + MDIO_CMD_REG) & MDIO_START; i++) {
38 if (i == WAIT_TIMEOUT)
39 return -ETIMEDOUT;
40 msleep(20);
41 }
42
43 return 0;
44}
45
46static int hip04_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
47{
48 struct hip04_mdio_priv *priv = bus->priv;
49 u32 val;
50 int ret;
51
52 ret = hip04_mdio_wait_ready(bus);
53 if (ret < 0)
54 goto out;
55
56 val = regnum | (mii_id << 5) | MDIO_READ;
57 writel_relaxed(val, priv->base + MDIO_CMD_REG);
58
59 ret = hip04_mdio_wait_ready(bus);
60 if (ret < 0)
61 goto out;
62
63 val = readl_relaxed(priv->base + MDIO_STA_REG);
64 if (val & MDIO_R_VALID) {
65 dev_err(bus->parent, "SMI bus read not valid\n");
66 ret = -ENODEV;
67 goto out;
68 }
69
70 val = readl_relaxed(priv->base + MDIO_RDATA_REG);
71 ret = val & 0xFFFF;
72out:
73 return ret;
74}
75
76static int hip04_mdio_write(struct mii_bus *bus, int mii_id,
77 int regnum, u16 value)
78{
79 struct hip04_mdio_priv *priv = bus->priv;
80 u32 val;
81 int ret;
82
83 ret = hip04_mdio_wait_ready(bus);
84 if (ret < 0)
85 goto out;
86
87 writel_relaxed(value, priv->base + MDIO_WDATA_REG);
88 val = regnum | (mii_id << 5) | MDIO_WRITE;
89 writel_relaxed(val, priv->base + MDIO_CMD_REG);
90out:
91 return ret;
92}
93
94static int hip04_mdio_reset(struct mii_bus *bus)
95{
96 int temp, i;
97
98 for (i = 0; i < PHY_MAX_ADDR; i++) {
99 hip04_mdio_write(bus, i, 22, 0);
100 temp = hip04_mdio_read(bus, i, MII_BMCR);
101 if (temp < 0)
102 continue;
103
104 temp |= BMCR_RESET;
105 if (hip04_mdio_write(bus, i, MII_BMCR, temp) < 0)
106 continue;
107 }
108
109 mdelay(500);
110 return 0;
111}
112
113static int hip04_mdio_probe(struct platform_device *pdev)
114{
115 struct resource *r;
116 struct mii_bus *bus;
117 struct hip04_mdio_priv *priv;
118 int ret;
119
120 bus = mdiobus_alloc_size(sizeof(struct hip04_mdio_priv));
121 if (!bus) {
122 dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
123 return -ENOMEM;
124 }
125
126 bus->name = "hip04_mdio_bus";
127 bus->read = hip04_mdio_read;
128 bus->write = hip04_mdio_write;
129 bus->reset = hip04_mdio_reset;
130 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
131 bus->parent = &pdev->dev;
132 priv = bus->priv;
133
134 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
135 priv->base = devm_ioremap_resource(&pdev->dev, r);
136 if (IS_ERR(priv->base)) {
137 ret = PTR_ERR(priv->base);
138 goto out_mdio;
139 }
140
141 ret = of_mdiobus_register(bus, pdev->dev.of_node);
142 if (ret < 0) {
143 dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
144 goto out_mdio;
145 }
146
147 platform_set_drvdata(pdev, bus);
148
149 return 0;
150
151out_mdio:
152 mdiobus_free(bus);
153 return ret;
154}
155
156static int hip04_mdio_remove(struct platform_device *pdev)
157{
158 struct mii_bus *bus = platform_get_drvdata(pdev);
159
160 mdiobus_unregister(bus);
161 mdiobus_free(bus);
162
163 return 0;
164}
165
166static const struct of_device_id hip04_mdio_match[] = {
167 { .compatible = "hisilicon,hip04-mdio" },
168 { }
169};
170MODULE_DEVICE_TABLE(of, hip04_mdio_match);
171
172static struct platform_driver hip04_mdio_driver = {
173 .probe = hip04_mdio_probe,
174 .remove = hip04_mdio_remove,
175 .driver = {
176 .name = "hip04-mdio",
177 .owner = THIS_MODULE,
178 .of_match_table = hip04_mdio_match,
179 },
180};
181
182module_platform_driver(hip04_mdio_driver);
183
184MODULE_DESCRIPTION("HISILICON P04 MDIO interface driver");
185MODULE_LICENSE("GPL v2");
186MODULE_ALIAS("platform:hip04-mdio");
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 566b17db135a..e8a1adb7a962 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2064,9 +2064,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2064 memset(swqe, 0, SWQE_HEADER_SIZE); 2064 memset(swqe, 0, SWQE_HEADER_SIZE);
2065 atomic_dec(&pr->swqe_avail); 2065 atomic_dec(&pr->swqe_avail);
2066 2066
2067 if (vlan_tx_tag_present(skb)) { 2067 if (skb_vlan_tag_present(skb)) {
2068 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; 2068 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2069 swqe->vlan_tag = vlan_tx_tag_get(skb); 2069 swqe->vlan_tag = skb_vlan_tag_get(skb);
2070 } 2070 }
2071 2071
2072 pr->tx_packets++; 2072 pr->tx_packets++;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 9388a83818f2..162762d1a12c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2367,7 +2367,7 @@ static int emac_wait_deps(struct emac_instance *dev)
2367 err = emac_check_deps(dev, deps) ? 0 : -ENODEV; 2367 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2368 for (i = 0; i < EMAC_DEP_COUNT; i++) { 2368 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2369 of_node_put(deps[i].node); 2369 of_node_put(deps[i].node);
2370 if (err && deps[i].ofdev) 2370 if (err)
2371 of_dev_put(deps[i].ofdev); 2371 of_dev_put(deps[i].ofdev);
2372 } 2372 }
2373 if (err == 0) { 2373 if (err == 0) {
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 4d61ef50b465..f4ff465584a0 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -192,6 +192,17 @@ config IXGBE
192 To compile this driver as a module, choose M here. The module 192 To compile this driver as a module, choose M here. The module
193 will be called ixgbe. 193 will be called ixgbe.
194 194
195config IXGBE_VXLAN
196 bool "Virtual eXtensible Local Area Network Support"
197 default n
198 depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m)
199 ---help---
200 This allows one to create VXLAN virtual interfaces that provide
201 Layer 2 Networks over Layer 3 Networks. VXLAN is often used
202 to tunnel virtual network infrastructure in virtualized environments.
203 Say Y here if you want to use Virtual eXtensible Local Area Network
204 (VXLAN) in the driver.
205
195config IXGBE_HWMON 206config IXGBE_HWMON
196 bool "Intel(R) 10GbE PCI Express adapters HWMON support" 207 bool "Intel(R) 10GbE PCI Express adapters HWMON support"
197 default y 208 default y
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index b691eb4f6376..4270ad2d4ddf 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -24,6 +24,7 @@
24/* ethtool support for e1000 */ 24/* ethtool support for e1000 */
25 25
26#include "e1000.h" 26#include "e1000.h"
27#include <linux/jiffies.h>
27#include <linux/uaccess.h> 28#include <linux/uaccess.h>
28 29
29enum {NETDEV_STATS, E1000_STATS}; 30enum {NETDEV_STATS, E1000_STATS};
@@ -1460,7 +1461,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1460 ret_val = 13; /* ret_val is the same as mis-compare */ 1461 ret_val = 13; /* ret_val is the same as mis-compare */
1461 break; 1462 break;
1462 } 1463 }
1463 if (jiffies >= (time + 2)) { 1464 if (time_after_eq(jiffies, time + 2)) {
1464 ret_val = 14; /* error code for time out error */ 1465 ret_val = 14; /* error code for time out error */
1465 break; 1466 break;
1466 } 1467 }
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 83140cbb5f01..7f997d36948f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2977,7 +2977,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
2977 struct e1000_tx_ring *tx_ring, int tx_flags, 2977 struct e1000_tx_ring *tx_ring, int tx_flags,
2978 int count) 2978 int count)
2979{ 2979{
2980 struct e1000_hw *hw = &adapter->hw;
2981 struct e1000_tx_desc *tx_desc = NULL; 2980 struct e1000_tx_desc *tx_desc = NULL;
2982 struct e1000_tx_buffer *buffer_info; 2981 struct e1000_tx_buffer *buffer_info;
2983 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2982 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -3031,11 +3030,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
3031 wmb(); 3030 wmb();
3032 3031
3033 tx_ring->next_to_use = i; 3032 tx_ring->next_to_use = i;
3034 writel(i, hw->hw_addr + tx_ring->tdt);
3035 /* we need this if more than one processor can write to our tail
3036 * at a time, it synchronizes IO on IA64/Altix systems
3037 */
3038 mmiowb();
3039} 3033}
3040 3034
3041/* 82547 workaround to avoid controller hang in half-duplex environment. 3035/* 82547 workaround to avoid controller hang in half-duplex environment.
@@ -3226,9 +3220,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3226 return NETDEV_TX_BUSY; 3220 return NETDEV_TX_BUSY;
3227 } 3221 }
3228 3222
3229 if (vlan_tx_tag_present(skb)) { 3223 if (skb_vlan_tag_present(skb)) {
3230 tx_flags |= E1000_TX_FLAGS_VLAN; 3224 tx_flags |= E1000_TX_FLAGS_VLAN;
3231 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 3225 tx_flags |= (skb_vlan_tag_get(skb) <<
3226 E1000_TX_FLAGS_VLAN_SHIFT);
3232 } 3227 }
3233 3228
3234 first = tx_ring->next_to_use; 3229 first = tx_ring->next_to_use;
@@ -3263,6 +3258,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3263 /* Make sure there is space in the ring for the next send. */ 3258 /* Make sure there is space in the ring for the next send. */
3264 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3259 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3265 3260
3261 if (!skb->xmit_more ||
3262 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3263 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3264 /* we need this if more than one processor can write to
3265 * our tail at a time, it synchronizes IO on IA64/Altix
3266 * systems
3267 */
3268 mmiowb();
3269 }
3266 } else { 3270 } else {
3267 dev_kfree_skb_any(skb); 3271 dev_kfree_skb_any(skb);
3268 tx_ring->buffer_info[first].time_stamp = 0; 3272 tx_ring->buffer_info[first].time_stamp = 0;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 7785240a0da1..9416e5a7e0c8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -34,7 +34,7 @@
34#include <linux/pci-aspm.h> 34#include <linux/pci-aspm.h>
35#include <linux/crc32.h> 35#include <linux/crc32.h>
36#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
37#include <linux/clocksource.h> 37#include <linux/timecounter.h>
38#include <linux/net_tstamp.h> 38#include <linux/net_tstamp.h>
39#include <linux/ptp_clock_kernel.h> 39#include <linux/ptp_clock_kernel.h>
40#include <linux/ptp_classify.h> 40#include <linux/ptp_classify.h>
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e14fd85f64eb..1e8c40fd5c3d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4189,7 +4189,7 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
4189 /* Setup hardware time stamping cyclecounter */ 4189 /* Setup hardware time stamping cyclecounter */
4190 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { 4190 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
4191 adapter->cc.read = e1000e_cyclecounter_read; 4191 adapter->cc.read = e1000e_cyclecounter_read;
4192 adapter->cc.mask = CLOCKSOURCE_MASK(64); 4192 adapter->cc.mask = CYCLECOUNTER_MASK(64);
4193 adapter->cc.mult = 1; 4193 adapter->cc.mult = 1;
4194 /* cc.shift set in e1000e_get_base_tininca() */ 4194 /* cc.shift set in e1000e_get_base_tininca() */
4195 4195
@@ -5444,16 +5444,6 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5444 wmb(); 5444 wmb();
5445 5445
5446 tx_ring->next_to_use = i; 5446 tx_ring->next_to_use = i;
5447
5448 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5449 e1000e_update_tdt_wa(tx_ring, i);
5450 else
5451 writel(i, tx_ring->tail);
5452
5453 /* we need this if more than one processor can write to our tail
5454 * at a time, it synchronizes IO on IA64/Altix systems
5455 */
5456 mmiowb();
5457} 5447}
5458 5448
5459#define MINIMUM_DHCP_PACKET_SIZE 282 5449#define MINIMUM_DHCP_PACKET_SIZE 282
@@ -5463,8 +5453,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5463 struct e1000_hw *hw = &adapter->hw; 5453 struct e1000_hw *hw = &adapter->hw;
5464 u16 length, offset; 5454 u16 length, offset;
5465 5455
5466 if (vlan_tx_tag_present(skb) && 5456 if (skb_vlan_tag_present(skb) &&
5467 !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 5457 !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
5468 (adapter->hw.mng_cookie.status & 5458 (adapter->hw.mng_cookie.status &
5469 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) 5459 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
5470 return 0; 5460 return 0;
@@ -5603,9 +5593,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5603 if (e1000_maybe_stop_tx(tx_ring, count + 2)) 5593 if (e1000_maybe_stop_tx(tx_ring, count + 2))
5604 return NETDEV_TX_BUSY; 5594 return NETDEV_TX_BUSY;
5605 5595
5606 if (vlan_tx_tag_present(skb)) { 5596 if (skb_vlan_tag_present(skb)) {
5607 tx_flags |= E1000_TX_FLAGS_VLAN; 5597 tx_flags |= E1000_TX_FLAGS_VLAN;
5608 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 5598 tx_flags |= (skb_vlan_tag_get(skb) <<
5599 E1000_TX_FLAGS_VLAN_SHIFT);
5609 } 5600 }
5610 5601
5611 first = tx_ring->next_to_use; 5602 first = tx_ring->next_to_use;
@@ -5635,8 +5626,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5635 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, 5626 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5636 nr_frags); 5627 nr_frags);
5637 if (count) { 5628 if (count) {
5638 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 5629 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5639 !adapter->tx_hwtstamp_skb)) { 5630 (adapter->flags & FLAG_HAS_HW_TIMESTAMP) &&
5631 !adapter->tx_hwtstamp_skb) {
5640 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 5632 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5641 tx_flags |= E1000_TX_FLAGS_HWTSTAMP; 5633 tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5642 adapter->tx_hwtstamp_skb = skb_get(skb); 5634 adapter->tx_hwtstamp_skb = skb_get(skb);
@@ -5653,6 +5645,21 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5653 (MAX_SKB_FRAGS * 5645 (MAX_SKB_FRAGS *
5654 DIV_ROUND_UP(PAGE_SIZE, 5646 DIV_ROUND_UP(PAGE_SIZE,
5655 adapter->tx_fifo_limit) + 2)); 5647 adapter->tx_fifo_limit) + 2));
5648
5649 if (!skb->xmit_more ||
5650 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
5651 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5652 e1000e_update_tdt_wa(tx_ring,
5653 tx_ring->next_to_use);
5654 else
5655 writel(tx_ring->next_to_use, tx_ring->tail);
5656
5657 /* we need this if more than one processor can write
5658 * to our tail at a time, it synchronizes IO on
5659 *IA64/Altix systems
5660 */
5661 mmiowb();
5662 }
5656 } else { 5663 } else {
5657 dev_kfree_skb_any(skb); 5664 dev_kfree_skb_any(skb);
5658 tx_ring->buffer_info[first].time_stamp = 0; 5665 tx_ring->buffer_info[first].time_stamp = 0;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index fb1a914a3ad4..978ef9c4a043 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -90,12 +90,9 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
90 struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, 90 struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
91 ptp_clock_info); 91 ptp_clock_info);
92 unsigned long flags; 92 unsigned long flags;
93 s64 now;
94 93
95 spin_lock_irqsave(&adapter->systim_lock, flags); 94 spin_lock_irqsave(&adapter->systim_lock, flags);
96 now = timecounter_read(&adapter->tc); 95 timecounter_adjtime(&adapter->tc, delta);
97 now += delta;
98 timecounter_init(&adapter->tc, &adapter->cc, now);
99 spin_unlock_irqrestore(&adapter->systim_lock, flags); 96 spin_unlock_irqrestore(&adapter->systim_lock, flags);
100 97
101 return 0; 98 return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index eb088b129bc7..84ab9eea2768 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -97,7 +97,6 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
97 */ 97 */
98 if (dma_mapping_error(rx_ring->dev, dma)) { 98 if (dma_mapping_error(rx_ring->dev, dma)) {
99 __free_page(page); 99 __free_page(page);
100 bi->page = NULL;
101 100
102 rx_ring->rx_stats.alloc_failed++; 101 rx_ring->rx_stats.alloc_failed++;
103 return false; 102 return false;
@@ -147,8 +146,8 @@ void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
147 i -= rx_ring->count; 146 i -= rx_ring->count;
148 } 147 }
149 148
150 /* clear the hdr_addr for the next_to_use descriptor */ 149 /* clear the status bits for the next_to_use descriptor */
151 rx_desc->q.hdr_addr = 0; 150 rx_desc->d.staterr = 0;
152 151
153 cleaned_count--; 152 cleaned_count--;
154 } while (cleaned_count); 153 } while (cleaned_count);
@@ -194,7 +193,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
194 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 193 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
195 194
196 /* transfer page from old buffer to new buffer */ 195 /* transfer page from old buffer to new buffer */
197 memcpy(new_buff, old_buff, sizeof(struct fm10k_rx_buffer)); 196 *new_buff = *old_buff;
198 197
199 /* sync the buffer for use by the device */ 198 /* sync the buffer for use by the device */
200 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, 199 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -203,12 +202,17 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
203 DMA_FROM_DEVICE); 202 DMA_FROM_DEVICE);
204} 203}
205 204
205static inline bool fm10k_page_is_reserved(struct page *page)
206{
207 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
208}
209
206static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, 210static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
207 struct page *page, 211 struct page *page,
208 unsigned int truesize) 212 unsigned int truesize)
209{ 213{
210 /* avoid re-using remote pages */ 214 /* avoid re-using remote pages */
211 if (unlikely(page_to_nid(page) != numa_mem_id())) 215 if (unlikely(fm10k_page_is_reserved(page)))
212 return false; 216 return false;
213 217
214#if (PAGE_SIZE < 8192) 218#if (PAGE_SIZE < 8192)
@@ -218,22 +222,19 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
218 222
219 /* flip page offset to other buffer */ 223 /* flip page offset to other buffer */
220 rx_buffer->page_offset ^= FM10K_RX_BUFSZ; 224 rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
221
222 /* Even if we own the page, we are not allowed to use atomic_set()
223 * This would break get_page_unless_zero() users.
224 */
225 atomic_inc(&page->_count);
226#else 225#else
227 /* move offset up to the next cache line */ 226 /* move offset up to the next cache line */
228 rx_buffer->page_offset += truesize; 227 rx_buffer->page_offset += truesize;
229 228
230 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) 229 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
231 return false; 230 return false;
232
233 /* bump ref count on page before it is given to the stack */
234 get_page(page);
235#endif 231#endif
236 232
233 /* Even if we own the page, we are not allowed to use atomic_set()
234 * This would break get_page_unless_zero() users.
235 */
236 atomic_inc(&page->_count);
237
237 return true; 238 return true;
238} 239}
239 240
@@ -270,12 +271,12 @@ static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring,
270 271
271 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 272 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
272 273
273 /* we can reuse buffer as-is, just make sure it is local */ 274 /* page is not reserved, we can reuse buffer as-is */
274 if (likely(page_to_nid(page) == numa_mem_id())) 275 if (likely(!fm10k_page_is_reserved(page)))
275 return true; 276 return true;
276 277
277 /* this page cannot be reused so discard it */ 278 /* this page cannot be reused so discard it */
278 put_page(page); 279 __free_page(page);
279 return false; 280 return false;
280 } 281 }
281 282
@@ -293,7 +294,6 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
293 struct page *page; 294 struct page *page;
294 295
295 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; 296 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
296
297 page = rx_buffer->page; 297 page = rx_buffer->page;
298 prefetchw(page); 298 prefetchw(page);
299 299
@@ -727,6 +727,12 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
727 struct ethhdr *eth_hdr; 727 struct ethhdr *eth_hdr;
728 u8 l4_hdr = 0; 728 u8 l4_hdr = 0;
729 729
730/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */
731#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET 164
732 if (skb_inner_transport_header(skb) - skb_mac_header(skb) >
733 FM10K_MAX_ENCAP_TRANSPORT_OFFSET)
734 return 0;
735
730 switch (vlan_get_protocol(skb)) { 736 switch (vlan_get_protocol(skb)) {
731 case htons(ETH_P_IP): 737 case htons(ETH_P_IP):
732 l4_hdr = ip_hdr(skb)->protocol; 738 l4_hdr = ip_hdr(skb)->protocol;
@@ -965,8 +971,8 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
965 tx_desc = FM10K_TX_DESC(tx_ring, i); 971 tx_desc = FM10K_TX_DESC(tx_ring, i);
966 972
967 /* add HW VLAN tag */ 973 /* add HW VLAN tag */
968 if (vlan_tx_tag_present(skb)) 974 if (skb_vlan_tag_present(skb))
969 tx_desc->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 975 tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
970 else 976 else
971 tx_desc->vlan = 0; 977 tx_desc->vlan = 0;
972 978
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 14a4ea795c01..9f5457c9e627 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1194,12 +1194,11 @@ static s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw,
1194{ 1194{
1195 const enum fm10k_mbx_state state = mbx->state; 1195 const enum fm10k_mbx_state state = mbx->state;
1196 const u32 *hdr = &mbx->mbx_hdr; 1196 const u32 *hdr = &mbx->mbx_hdr;
1197 u16 head, tail; 1197 u16 head;
1198 s32 err; 1198 s32 err;
1199 1199
1200 /* we will need to pull all of the fields for verification */ 1200 /* we will need to pull the header field for verification */
1201 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); 1201 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
1202 tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL);
1203 1202
1204 /* We should not be receiving disconnect if Rx is incomplete */ 1203 /* We should not be receiving disconnect if Rx is incomplete */
1205 if (mbx->pushed) 1204 if (mbx->pushed)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 8811364b91cb..cfde8bac1aeb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -609,7 +609,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
609 int err; 609 int err;
610 610
611 if ((skb->protocol == htons(ETH_P_8021Q)) && 611 if ((skb->protocol == htons(ETH_P_8021Q)) &&
612 !vlan_tx_tag_present(skb)) { 612 !skb_vlan_tag_present(skb)) {
613 /* FM10K only supports hardware tagging, any tags in frame 613 /* FM10K only supports hardware tagging, any tags in frame
614 * are considered 2nd level or "outer" tags 614 * are considered 2nd level or "outer" tags
615 */ 615 */
@@ -1414,13 +1414,12 @@ struct net_device *fm10k_alloc_netdev(void)
1414 dev->vlan_features |= dev->features; 1414 dev->vlan_features |= dev->features;
1415 1415
1416 /* configure tunnel offloads */ 1416 /* configure tunnel offloads */
1417 dev->hw_enc_features = NETIF_F_IP_CSUM | 1417 dev->hw_enc_features |= NETIF_F_IP_CSUM |
1418 NETIF_F_TSO | 1418 NETIF_F_TSO |
1419 NETIF_F_TSO6 | 1419 NETIF_F_TSO6 |
1420 NETIF_F_TSO_ECN | 1420 NETIF_F_TSO_ECN |
1421 NETIF_F_GSO_UDP_TUNNEL | 1421 NETIF_F_GSO_UDP_TUNNEL |
1422 NETIF_F_IPV6_CSUM | 1422 NETIF_F_IPV6_CSUM;
1423 NETIF_F_SG;
1424 1423
1425 /* we want to leave these both on as we cannot disable VLAN tag 1424 /* we want to leave these both on as we cannot disable VLAN tag
1426 * insertion or stripping on the hardware since it is contained 1425 * insertion or stripping on the hardware since it is contained
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 275423d4f777..7e4711958e46 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -330,13 +330,10 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
330 struct fm10k_mac_update mac_update; 330 struct fm10k_mac_update mac_update;
331 u32 msg[5]; 331 u32 msg[5];
332 332
333 /* if glort is not valid return error */ 333 /* if glort or vlan are not valid return error */
334 if (!fm10k_glort_valid_pf(hw, glort)) 334 if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
335 return FM10K_ERR_PARAM; 335 return FM10K_ERR_PARAM;
336 336
337 /* drop upper 4 bits of VLAN ID */
338 vid = (vid << 4) >> 4;
339
340 /* record fields */ 337 /* record fields */
341 mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) | 338 mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) |
342 ((u32)mac[3] << 16) | 339 ((u32)mac[3] << 16) |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
index 7822809436a3..d966044e017a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
@@ -57,7 +57,6 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
57 struct sk_buff_head *list = &interface->ts_tx_skb_queue; 57 struct sk_buff_head *list = &interface->ts_tx_skb_queue;
58 struct sk_buff *clone; 58 struct sk_buff *clone;
59 unsigned long flags; 59 unsigned long flags;
60 __le16 dglort;
61 60
62 /* create clone for us to return on the Tx path */ 61 /* create clone for us to return on the Tx path */
63 clone = skb_clone_sk(skb); 62 clone = skb_clone_sk(skb);
@@ -65,8 +64,6 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
65 return; 64 return;
66 65
67 FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT; 66 FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT;
68 dglort = FM10K_CB(clone)->fi.w.dglort;
69
70 spin_lock_irqsave(&list->lock, flags); 67 spin_lock_irqsave(&list->lock, flags);
71 68
72 /* attempt to locate any buffers with the same dglort, 69 /* attempt to locate any buffers with the same dglort,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 280296f29154..7c6d9d5a8ae5 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -354,7 +354,7 @@ struct fm10k_hw;
354 354
355/* Define timeouts for resets and disables */ 355/* Define timeouts for resets and disables */
356#define FM10K_QUEUE_DISABLE_TIMEOUT 100 356#define FM10K_QUEUE_DISABLE_TIMEOUT 100
357#define FM10K_RESET_TIMEOUT 100 357#define FM10K_RESET_TIMEOUT 150
358 358
359/* VF registers */ 359/* VF registers */
360#define FM10K_VFCTRL 0x00000 360#define FM10K_VFCTRL 0x00000
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index fc50f6461b13..2b65cdcad6ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -87,11 +87,12 @@
87#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */ 87#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
88#endif /* I40E_FCOE */ 88#endif /* I40E_FCOE */
89#define I40E_MAX_AQ_BUF_SIZE 4096 89#define I40E_MAX_AQ_BUF_SIZE 4096
90#define I40E_AQ_LEN 128 90#define I40E_AQ_LEN 256
91#define I40E_AQ_WORK_LIMIT 16 91#define I40E_AQ_WORK_LIMIT 32
92#define I40E_MAX_USER_PRIORITY 8 92#define I40E_MAX_USER_PRIORITY 8
93#define I40E_DEFAULT_MSG_ENABLE 4 93#define I40E_DEFAULT_MSG_ENABLE 4
94#define I40E_QUEUE_WAIT_RETRY_LIMIT 10 94#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
95#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9)
95 96
96#define I40E_NVM_VERSION_LO_SHIFT 0 97#define I40E_NVM_VERSION_LO_SHIFT 0
97#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) 98#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -147,6 +148,7 @@ enum i40e_state_t {
147 __I40E_FD_FLUSH_REQUESTED, 148 __I40E_FD_FLUSH_REQUESTED,
148 __I40E_RESET_FAILED, 149 __I40E_RESET_FAILED,
149 __I40E_PORT_TX_SUSPENDED, 150 __I40E_PORT_TX_SUSPENDED,
151 __I40E_VF_DISABLE,
150}; 152};
151 153
152enum i40e_interrupt_policy { 154enum i40e_interrupt_policy {
@@ -268,7 +270,7 @@ struct i40e_pf {
268 u16 rx_itr_default; 270 u16 rx_itr_default;
269 u16 tx_itr_default; 271 u16 tx_itr_default;
270 u16 msg_enable; 272 u16 msg_enable;
271 char misc_int_name[IFNAMSIZ + 9]; 273 char int_name[I40E_INT_NAME_STR_LEN];
272 u16 adminq_work_limit; /* num of admin receive queue desc to process */ 274 u16 adminq_work_limit; /* num of admin receive queue desc to process */
273 unsigned long service_timer_period; 275 unsigned long service_timer_period;
274 unsigned long service_timer_previous; 276 unsigned long service_timer_previous;
@@ -524,7 +526,7 @@ struct i40e_q_vector {
524 526
525 cpumask_t affinity_mask; 527 cpumask_t affinity_mask;
526 struct rcu_head rcu; /* to avoid race with update stats on free */ 528 struct rcu_head rcu; /* to avoid race with update stats on free */
527 char name[IFNAMSIZ + 9]; 529 char name[I40E_INT_NAME_STR_LEN];
528} ____cacheline_internodealigned_in_smp; 530} ____cacheline_internodealigned_in_smp;
529 531
530/* lan device */ 532/* lan device */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 564d0b0192f7..de17b6fbcc4e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -148,7 +148,7 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
148 148
149/* general information */ 149/* general information */
150#define I40E_AQ_LARGE_BUF 512 150#define I40E_AQ_LARGE_BUF 512
151#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */ 151#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
152 152
153void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 153void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
154 u16 opcode); 154 u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 8835aeeff23e..929e3d72a01e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -256,6 +256,8 @@ enum i40e_admin_queue_opc {
256 i40e_aqc_opc_lldp_stop = 0x0A05, 256 i40e_aqc_opc_lldp_stop = 0x0A05,
257 i40e_aqc_opc_lldp_start = 0x0A06, 257 i40e_aqc_opc_lldp_start = 0x0A06,
258 i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07, 258 i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
259 i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
260 i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
259 261
260 /* Tunnel commands */ 262 /* Tunnel commands */
261 i40e_aqc_opc_add_udp_tunnel = 0x0B00, 263 i40e_aqc_opc_add_udp_tunnel = 0x0B00,
@@ -268,6 +270,8 @@ enum i40e_admin_queue_opc {
268 /* OEM commands */ 270 /* OEM commands */
269 i40e_aqc_opc_oem_parameter_change = 0xFE00, 271 i40e_aqc_opc_oem_parameter_change = 0xFE00,
270 i40e_aqc_opc_oem_device_status_change = 0xFE01, 272 i40e_aqc_opc_oem_device_status_change = 0xFE01,
273 i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
274 i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
271 275
272 /* debug commands */ 276 /* debug commands */
273 i40e_aqc_opc_debug_get_deviceid = 0xFF00, 277 i40e_aqc_opc_debug_get_deviceid = 0xFF00,
@@ -276,7 +280,6 @@ enum i40e_admin_queue_opc {
276 i40e_aqc_opc_debug_write_reg = 0xFF04, 280 i40e_aqc_opc_debug_write_reg = 0xFF04,
277 i40e_aqc_opc_debug_modify_reg = 0xFF07, 281 i40e_aqc_opc_debug_modify_reg = 0xFF07,
278 i40e_aqc_opc_debug_dump_internals = 0xFF08, 282 i40e_aqc_opc_debug_dump_internals = 0xFF08,
279 i40e_aqc_opc_debug_modify_internals = 0xFF09,
280}; 283};
281 284
282/* command structures and indirect data structures */ 285/* command structures and indirect data structures */
@@ -410,6 +413,7 @@ struct i40e_aqc_list_capabilities_element_resp {
410#define I40E_AQ_CAP_ID_VSI 0x0017 413#define I40E_AQ_CAP_ID_VSI 0x0017
411#define I40E_AQ_CAP_ID_DCB 0x0018 414#define I40E_AQ_CAP_ID_DCB 0x0018
412#define I40E_AQ_CAP_ID_FCOE 0x0021 415#define I40E_AQ_CAP_ID_FCOE 0x0021
416#define I40E_AQ_CAP_ID_ISCSI 0x0022
413#define I40E_AQ_CAP_ID_RSS 0x0040 417#define I40E_AQ_CAP_ID_RSS 0x0040
414#define I40E_AQ_CAP_ID_RXQ 0x0041 418#define I40E_AQ_CAP_ID_RXQ 0x0041
415#define I40E_AQ_CAP_ID_TXQ 0x0042 419#define I40E_AQ_CAP_ID_TXQ 0x0042
@@ -454,8 +458,11 @@ struct i40e_aqc_arp_proxy_data {
454 __le32 pfpm_proxyfc; 458 __le32 pfpm_proxyfc;
455 __le32 ip_addr; 459 __le32 ip_addr;
456 u8 mac_addr[6]; 460 u8 mac_addr[6];
461 u8 reserved[2];
457}; 462};
458 463
464I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
465
459/* Set NS Proxy Table Entry Command (indirect 0x0105) */ 466/* Set NS Proxy Table Entry Command (indirect 0x0105) */
460struct i40e_aqc_ns_proxy_data { 467struct i40e_aqc_ns_proxy_data {
461 __le16 table_idx_mac_addr_0; 468 __le16 table_idx_mac_addr_0;
@@ -481,6 +488,8 @@ struct i40e_aqc_ns_proxy_data {
481 u8 ipv6_addr_1[16]; 488 u8 ipv6_addr_1[16];
482}; 489};
483 490
491I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
492
484/* Manage LAA Command (0x0106) - obsolete */ 493/* Manage LAA Command (0x0106) - obsolete */
485struct i40e_aqc_mng_laa { 494struct i40e_aqc_mng_laa {
486 __le16 command_flags; 495 __le16 command_flags;
@@ -491,6 +500,8 @@ struct i40e_aqc_mng_laa {
491 u8 reserved2[6]; 500 u8 reserved2[6];
492}; 501};
493 502
503I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
504
494/* Manage MAC Address Read Command (indirect 0x0107) */ 505/* Manage MAC Address Read Command (indirect 0x0107) */
495struct i40e_aqc_mac_address_read { 506struct i40e_aqc_mac_address_read {
496 __le16 command_flags; 507 __le16 command_flags;
@@ -562,6 +573,8 @@ struct i40e_aqc_get_switch_config_header_resp {
562 u8 reserved[12]; 573 u8 reserved[12];
563}; 574};
564 575
576I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
577
565struct i40e_aqc_switch_config_element_resp { 578struct i40e_aqc_switch_config_element_resp {
566 u8 element_type; 579 u8 element_type;
567#define I40E_AQ_SW_ELEM_TYPE_MAC 1 580#define I40E_AQ_SW_ELEM_TYPE_MAC 1
@@ -587,6 +600,8 @@ struct i40e_aqc_switch_config_element_resp {
587 __le16 element_info; 600 __le16 element_info;
588}; 601};
589 602
603I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
604
590/* Get Switch Configuration (indirect 0x0200) 605/* Get Switch Configuration (indirect 0x0200)
591 * an array of elements are returned in the response buffer 606 * an array of elements are returned in the response buffer
592 * the first in the array is the header, remainder are elements 607 * the first in the array is the header, remainder are elements
@@ -596,6 +611,8 @@ struct i40e_aqc_get_switch_config_resp {
596 struct i40e_aqc_switch_config_element_resp element[1]; 611 struct i40e_aqc_switch_config_element_resp element[1];
597}; 612};
598 613
614I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
615
599/* Add Statistics (direct 0x0201) 616/* Add Statistics (direct 0x0201)
600 * Remove Statistics (direct 0x0202) 617 * Remove Statistics (direct 0x0202)
601 */ 618 */
@@ -661,6 +678,8 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
661 u8 reserved2[6]; 678 u8 reserved2[6];
662}; 679};
663 680
681I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
682
664/* Add VSI (indirect 0x0210) 683/* Add VSI (indirect 0x0210)
665 * this indirect command uses struct i40e_aqc_vsi_properties_data 684 * this indirect command uses struct i40e_aqc_vsi_properties_data
666 * as the indirect buffer (128 bytes) 685 * as the indirect buffer (128 bytes)
@@ -1092,6 +1111,8 @@ struct i40e_aqc_remove_tag {
1092 u8 reserved[12]; 1111 u8 reserved[12];
1093}; 1112};
1094 1113
1114I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
1115
1095/* Add multicast E-Tag (direct 0x0257) 1116/* Add multicast E-Tag (direct 0x0257)
1096 * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields 1117 * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
1097 * and no external data 1118 * and no external data
@@ -1207,7 +1228,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
1207 } ipaddr; 1228 } ipaddr;
1208 __le16 flags; 1229 __le16 flags;
1209#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 1230#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
1210#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ 1231#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
1211 I40E_AQC_ADD_CLOUD_FILTER_SHIFT) 1232 I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
1212/* 0x0000 reserved */ 1233/* 0x0000 reserved */
1213#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 1234#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
@@ -1240,7 +1261,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
1240 u8 reserved[4]; 1261 u8 reserved[4];
1241 __le16 queue_number; 1262 __le16 queue_number;
1242#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 1263#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
1243#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ 1264#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
1244 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) 1265 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
1245 u8 reserved2[14]; 1266 u8 reserved2[14];
1246 /* response section */ 1267 /* response section */
@@ -1359,6 +1380,8 @@ struct i40e_aqc_configure_vsi_ets_sla_bw_data {
1359 u8 reserved1[28]; 1380 u8 reserved1[28];
1360}; 1381};
1361 1382
1383I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
1384
1362/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) 1385/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
1363 * responds with i40e_aqc_qs_handles_resp 1386 * responds with i40e_aqc_qs_handles_resp
1364 */ 1387 */
@@ -1370,6 +1393,8 @@ struct i40e_aqc_configure_vsi_tc_bw_data {
1370 __le16 qs_handles[8]; 1393 __le16 qs_handles[8];
1371}; 1394};
1372 1395
1396I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
1397
1373/* Query vsi bw configuration (indirect 0x0408) */ 1398/* Query vsi bw configuration (indirect 0x0408) */
1374struct i40e_aqc_query_vsi_bw_config_resp { 1399struct i40e_aqc_query_vsi_bw_config_resp {
1375 u8 tc_valid_bits; 1400 u8 tc_valid_bits;
@@ -1383,6 +1408,8 @@ struct i40e_aqc_query_vsi_bw_config_resp {
1383 u8 reserved3[23]; 1408 u8 reserved3[23];
1384}; 1409};
1385 1410
1411I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
1412
1386/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ 1413/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
1387struct i40e_aqc_query_vsi_ets_sla_config_resp { 1414struct i40e_aqc_query_vsi_ets_sla_config_resp {
1388 u8 tc_valid_bits; 1415 u8 tc_valid_bits;
@@ -1394,6 +1421,8 @@ struct i40e_aqc_query_vsi_ets_sla_config_resp {
1394 __le16 tc_bw_max[2]; 1421 __le16 tc_bw_max[2];
1395}; 1422};
1396 1423
1424I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
1425
1397/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ 1426/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
1398struct i40e_aqc_configure_switching_comp_bw_limit { 1427struct i40e_aqc_configure_switching_comp_bw_limit {
1399 __le16 seid; 1428 __le16 seid;
@@ -1421,6 +1450,8 @@ struct i40e_aqc_configure_switching_comp_ets_data {
1421 u8 reserved2[96]; 1450 u8 reserved2[96];
1422}; 1451};
1423 1452
1453I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
1454
1424/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ 1455/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
1425struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { 1456struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
1426 u8 tc_valid_bits; 1457 u8 tc_valid_bits;
@@ -1432,6 +1463,9 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
1432 u8 reserved1[28]; 1463 u8 reserved1[28];
1433}; 1464};
1434 1465
1466I40E_CHECK_STRUCT_LEN(0x40,
1467 i40e_aqc_configure_switching_comp_ets_bw_limit_data);
1468
1435/* Configure Switching Component Bandwidth Allocation per Tc 1469/* Configure Switching Component Bandwidth Allocation per Tc
1436 * (indirect 0x0417) 1470 * (indirect 0x0417)
1437 */ 1471 */
@@ -1443,6 +1477,8 @@ struct i40e_aqc_configure_switching_comp_bw_config_data {
1443 u8 reserved1[20]; 1477 u8 reserved1[20];
1444}; 1478};
1445 1479
1480I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
1481
1446/* Query Switching Component Configuration (indirect 0x0418) */ 1482/* Query Switching Component Configuration (indirect 0x0418) */
1447struct i40e_aqc_query_switching_comp_ets_config_resp { 1483struct i40e_aqc_query_switching_comp_ets_config_resp {
1448 u8 tc_valid_bits; 1484 u8 tc_valid_bits;
@@ -1453,6 +1489,8 @@ struct i40e_aqc_query_switching_comp_ets_config_resp {
1453 u8 reserved2[23]; 1489 u8 reserved2[23];
1454}; 1490};
1455 1491
1492I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
1493
1456/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ 1494/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
1457struct i40e_aqc_query_port_ets_config_resp { 1495struct i40e_aqc_query_port_ets_config_resp {
1458 u8 reserved[4]; 1496 u8 reserved[4];
@@ -1468,6 +1506,8 @@ struct i40e_aqc_query_port_ets_config_resp {
1468 u8 reserved3[32]; 1506 u8 reserved3[32];
1469}; 1507};
1470 1508
1509I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
1510
1471/* Query Switching Component Bandwidth Allocation per Traffic Type 1511/* Query Switching Component Bandwidth Allocation per Traffic Type
1472 * (indirect 0x041A) 1512 * (indirect 0x041A)
1473 */ 1513 */
@@ -1482,6 +1522,8 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
1482 __le16 tc_bw_max[2]; 1522 __le16 tc_bw_max[2];
1483}; 1523};
1484 1524
1525I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
1526
1485/* Suspend/resume port TX traffic 1527/* Suspend/resume port TX traffic
1486 * (direct 0x041B and 0x041C) uses the generic SEID struct 1528 * (direct 0x041B and 0x041C) uses the generic SEID struct
1487 */ 1529 */
@@ -1495,6 +1537,8 @@ struct i40e_aqc_configure_partition_bw_data {
1495 u8 max_bw[16]; /* bandwidth limit */ 1537 u8 max_bw[16]; /* bandwidth limit */
1496}; 1538};
1497 1539
1540I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
1541
1498/* Get and set the active HMC resource profile and status. 1542/* Get and set the active HMC resource profile and status.
1499 * (direct 0x0500) and (direct 0x0501) 1543 * (direct 0x0500) and (direct 0x0501)
1500 */ 1544 */
@@ -1577,6 +1621,8 @@ struct i40e_aqc_module_desc {
1577 u8 reserved2[8]; 1621 u8 reserved2[8];
1578}; 1622};
1579 1623
1624I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
1625
1580struct i40e_aq_get_phy_abilities_resp { 1626struct i40e_aq_get_phy_abilities_resp {
1581 __le32 phy_type; /* bitmap using the above enum for offsets */ 1627 __le32 phy_type; /* bitmap using the above enum for offsets */
1582 u8 link_speed; /* bitmap using the above enum bit patterns */ 1628 u8 link_speed; /* bitmap using the above enum bit patterns */
@@ -1605,6 +1651,8 @@ struct i40e_aq_get_phy_abilities_resp {
1605 struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; 1651 struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
1606}; 1652};
1607 1653
1654I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
1655
1608/* Set PHY Config (direct 0x0601) */ 1656/* Set PHY Config (direct 0x0601) */
1609struct i40e_aq_set_phy_config { /* same bits as above in all */ 1657struct i40e_aq_set_phy_config { /* same bits as above in all */
1610 __le32 phy_type; 1658 __le32 phy_type;
@@ -1788,12 +1836,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
1788/* NVM Config Read (indirect 0x0704) */ 1836/* NVM Config Read (indirect 0x0704) */
1789struct i40e_aqc_nvm_config_read { 1837struct i40e_aqc_nvm_config_read {
1790 __le16 cmd_flags; 1838 __le16 cmd_flags;
1791#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 1839#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
1792#define ANVM_READ_SINGLE_FEATURE 0 1840#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
1793#define ANVM_READ_MULTIPLE_FEATURES 1 1841#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
1794 __le16 element_count; 1842 __le16 element_count;
1795 __le16 element_id; /* Feature/field ID */ 1843 __le16 element_id; /* Feature/field ID */
1796 u8 reserved[2]; 1844 __le16 element_id_msw; /* MSWord of field ID */
1797 __le32 address_high; 1845 __le32 address_high;
1798 __le32 address_low; 1846 __le32 address_low;
1799}; 1847};
@@ -1811,21 +1859,32 @@ struct i40e_aqc_nvm_config_write {
1811 1859
1812I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); 1860I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
1813 1861
1862/* Used for 0x0704 as well as for 0x0705 commands */
1863#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
1864#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
1865 (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
1866#define I40E_AQ_ANVM_FEATURE 0
1867#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
1814struct i40e_aqc_nvm_config_data_feature { 1868struct i40e_aqc_nvm_config_data_feature {
1815 __le16 feature_id; 1869 __le16 feature_id;
1816 __le16 instance_id; 1870#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
1871#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
1872#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
1817 __le16 feature_options; 1873 __le16 feature_options;
1818 __le16 feature_selection; 1874 __le16 feature_selection;
1819}; 1875};
1820 1876
1877I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
1878
1821struct i40e_aqc_nvm_config_data_immediate_field { 1879struct i40e_aqc_nvm_config_data_immediate_field {
1822#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2 1880 __le32 field_id;
1823 __le16 field_id; 1881 __le32 field_value;
1824 __le16 instance_id;
1825 __le16 field_options; 1882 __le16 field_options;
1826 __le16 field_value; 1883 __le16 reserved;
1827}; 1884};
1828 1885
1886I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
1887
1829/* Send to PF command (indirect 0x0801) id is only used by PF 1888/* Send to PF command (indirect 0x0801) id is only used by PF
1830 * Send to VF command (indirect 0x0802) id is only used by PF 1889 * Send to VF command (indirect 0x0802) id is only used by PF
1831 * Send to Peer PF command (indirect 0x0803) 1890 * Send to Peer PF command (indirect 0x0803)
@@ -2026,12 +2085,54 @@ struct i40e_aqc_get_cee_dcb_cfg_resp {
2026 u8 oper_tc_bw[8]; 2085 u8 oper_tc_bw[8];
2027 u8 oper_pfc_en; 2086 u8 oper_pfc_en;
2028 __le16 oper_app_prio; 2087 __le16 oper_app_prio;
2088#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
2089#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
2090#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
2091#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
2092#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
2093#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
2094#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
2029 __le32 tlv_status; 2095 __le32 tlv_status;
2096#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
2097#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
2098#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
2099#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
2100#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
2101#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
2030 u8 reserved[12]; 2102 u8 reserved[12];
2031}; 2103};
2032 2104
2033I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); 2105I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
2034 2106
2107/* Set Local LLDP MIB (indirect 0x0A08)
2108 * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
2109 */
2110struct i40e_aqc_lldp_set_local_mib {
2111#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
2112#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
2113 u8 type;
2114 u8 reserved0;
2115 __le16 length;
2116 u8 reserved1[4];
2117 __le32 address_high;
2118 __le32 address_low;
2119};
2120
2121I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
2122
2123/* Stop/Start LLDP Agent (direct 0x0A09)
2124 * Used for stopping/starting specific LLDP agent. e.g. DCBx
2125 */
2126struct i40e_aqc_lldp_stop_start_specific_agent {
2127#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
2128#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
2129 (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
2130 u8 command;
2131 u8 reserved[15];
2132};
2133
2134I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
2135
2035/* Add Udp Tunnel command and completion (direct 0x0B00) */ 2136/* Add Udp Tunnel command and completion (direct 0x0B00) */
2036struct i40e_aqc_add_udp_tunnel { 2137struct i40e_aqc_add_udp_tunnel {
2037 __le16 udp_port; 2138 __le16 udp_port;
@@ -2106,7 +2207,8 @@ struct i40e_aqc_oem_param_change {
2106#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 2207#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
2107#define I40E_AQ_OEM_PARAM_MAC 2 2208#define I40E_AQ_OEM_PARAM_MAC 2
2108 __le32 param_value1; 2209 __le32 param_value1;
2109 u8 param_value2[8]; 2210 __le16 param_value2;
2211 u8 reserved[6];
2110}; 2212};
2111 2213
2112I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); 2214I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
@@ -2120,6 +2222,28 @@ struct i40e_aqc_oem_state_change {
2120 2222
2121I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); 2223I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
2122 2224
2225/* Initialize OCSD (0xFE02, direct) */
2226struct i40e_aqc_opc_oem_ocsd_initialize {
2227 u8 type_status;
2228 u8 reserved1[3];
2229 __le32 ocsd_memory_block_addr_high;
2230 __le32 ocsd_memory_block_addr_low;
2231 __le32 requested_update_interval;
2232};
2233
2234I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
2235
2236/* Initialize OCBB (0xFE03, direct) */
2237struct i40e_aqc_opc_oem_ocbb_initialize {
2238 u8 type_status;
2239 u8 reserved1[3];
2240 __le32 ocbb_memory_block_addr_high;
2241 __le32 ocbb_memory_block_addr_low;
2242 u8 reserved2[4];
2243};
2244
2245I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
2246
2123/* debug commands */ 2247/* debug commands */
2124 2248
2125/* get device id (0xFF00) uses the generic structure */ 2249/* get device id (0xFF00) uses the generic structure */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 3d741ee99a2c..11a9ffebf8d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -742,6 +742,65 @@ i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
742#endif 742#endif
743 743
744/** 744/**
745 * i40e_read_pba_string - Reads part number string from EEPROM
746 * @hw: pointer to hardware structure
747 * @pba_num: stores the part number string from the EEPROM
748 * @pba_num_size: part number string buffer length
749 *
750 * Reads the part number string from the EEPROM.
751 **/
752i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
753 u32 pba_num_size)
754{
755 i40e_status status = 0;
756 u16 pba_word = 0;
757 u16 pba_size = 0;
758 u16 pba_ptr = 0;
759 u16 i = 0;
760
761 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
762 if (status || (pba_word != 0xFAFA)) {
763 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
764 return status;
765 }
766
767 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
768 if (status) {
769 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
770 return status;
771 }
772
773 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
774 if (status) {
775 hw_dbg(hw, "Failed to read PBA Block size.\n");
776 return status;
777 }
778
779 /* Subtract one to get PBA word count (PBA Size word is included in
780 * total size)
781 */
782 pba_size--;
783 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
784 hw_dbg(hw, "Buffer to small for PBA data.\n");
785 return I40E_ERR_PARAM;
786 }
787
788 for (i = 0; i < pba_size; i++) {
789 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
790 if (status) {
791 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
792 return status;
793 }
794
795 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
796 pba_num[(i * 2) + 1] = pba_word & 0xFF;
797 }
798 pba_num[(pba_size * 2)] = '\0';
799
800 return status;
801}
802
803/**
745 * i40e_get_media_type - Gets media type 804 * i40e_get_media_type - Gets media type
746 * @hw: pointer to the hardware structure 805 * @hw: pointer to the hardware structure
747 **/ 806 **/
@@ -1083,8 +1142,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1083 if (mode == I40E_LINK_ACTIVITY) 1142 if (mode == I40E_LINK_ACTIVITY)
1084 blink = false; 1143 blink = false;
1085 1144
1086 gpio_val |= (blink ? 1 : 0) << 1145 if (blink)
1087 I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT; 1146 gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1147 else
1148 gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1088 1149
1089 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1150 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1090 break; 1151 break;
@@ -2035,6 +2096,43 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2035} 2096}
2036 2097
2037/** 2098/**
2099 * i40e_aq_debug_read_register
2100 * @hw: pointer to the hw struct
2101 * @reg_addr: register address
2102 * @reg_val: register value
2103 * @cmd_details: pointer to command details structure or NULL
2104 *
2105 * Read the register using the admin queue commands
2106 **/
2107i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
2108 u32 reg_addr, u64 *reg_val,
2109 struct i40e_asq_cmd_details *cmd_details)
2110{
2111 struct i40e_aq_desc desc;
2112 struct i40e_aqc_debug_reg_read_write *cmd_resp =
2113 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2114 i40e_status status;
2115
2116 if (reg_val == NULL)
2117 return I40E_ERR_PARAM;
2118
2119 i40e_fill_default_direct_cmd_desc(&desc,
2120 i40e_aqc_opc_debug_read_reg);
2121
2122 cmd_resp->address = cpu_to_le32(reg_addr);
2123
2124 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2125
2126 if (!status) {
2127 *reg_val = ((u64)cmd_resp->value_high << 32) |
2128 (u64)cmd_resp->value_low;
2129 *reg_val = le64_to_cpu(*reg_val);
2130 }
2131
2132 return status;
2133}
2134
2135/**
2038 * i40e_aq_debug_write_register 2136 * i40e_aq_debug_write_register
2039 * @hw: pointer to the hw struct 2137 * @hw: pointer to the hw struct
2040 * @reg_addr: register address 2138 * @reg_addr: register address
@@ -2264,6 +2362,7 @@ i40e_aq_erase_nvm_exit:
2264#define I40E_DEV_FUNC_CAP_VSI 0x17 2362#define I40E_DEV_FUNC_CAP_VSI 0x17
2265#define I40E_DEV_FUNC_CAP_DCB 0x18 2363#define I40E_DEV_FUNC_CAP_DCB 0x18
2266#define I40E_DEV_FUNC_CAP_FCOE 0x21 2364#define I40E_DEV_FUNC_CAP_FCOE 0x21
2365#define I40E_DEV_FUNC_CAP_ISCSI 0x22
2267#define I40E_DEV_FUNC_CAP_RSS 0x40 2366#define I40E_DEV_FUNC_CAP_RSS 0x40
2268#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41 2367#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
2269#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42 2368#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
@@ -2292,6 +2391,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
2292 enum i40e_admin_queue_opc list_type_opc) 2391 enum i40e_admin_queue_opc list_type_opc)
2293{ 2392{
2294 struct i40e_aqc_list_capabilities_element_resp *cap; 2393 struct i40e_aqc_list_capabilities_element_resp *cap;
2394 u32 valid_functions, num_functions;
2295 u32 number, logical_id, phys_id; 2395 u32 number, logical_id, phys_id;
2296 struct i40e_hw_capabilities *p; 2396 struct i40e_hw_capabilities *p;
2297 u32 i = 0; 2397 u32 i = 0;
@@ -2362,6 +2462,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
2362 if (number == 1) 2462 if (number == 1)
2363 p->fcoe = true; 2463 p->fcoe = true;
2364 break; 2464 break;
2465 case I40E_DEV_FUNC_CAP_ISCSI:
2466 if (number == 1)
2467 p->iscsi = true;
2468 break;
2365 case I40E_DEV_FUNC_CAP_RSS: 2469 case I40E_DEV_FUNC_CAP_RSS:
2366 p->rss = true; 2470 p->rss = true;
2367 p->rss_table_size = number; 2471 p->rss_table_size = number;
@@ -2427,6 +2531,34 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
2427 if (p->npar_enable || p->mfp_mode_1) 2531 if (p->npar_enable || p->mfp_mode_1)
2428 p->fcoe = false; 2532 p->fcoe = false;
2429 2533
2534 /* count the enabled ports (aka the "not disabled" ports) */
2535 hw->num_ports = 0;
2536 for (i = 0; i < 4; i++) {
2537 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
2538 u64 port_cfg = 0;
2539
2540 /* use AQ read to get the physical register offset instead
2541 * of the port relative offset
2542 */
2543 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
2544 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
2545 hw->num_ports++;
2546 }
2547
2548 valid_functions = p->valid_functions;
2549 num_functions = 0;
2550 while (valid_functions) {
2551 if (valid_functions & 1)
2552 num_functions++;
2553 valid_functions >>= 1;
2554 }
2555
2556 /* partition id is 1-based, and functions are evenly spread
2557 * across the ports as partitions
2558 */
2559 hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
2560 hw->num_partitions = num_functions / hw->num_ports;
2561
2430 /* additional HW specific goodies that might 2562 /* additional HW specific goodies that might
2431 * someday be HW version specific 2563 * someday be HW version specific
2432 */ 2564 */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index cb0de455683e..61236f983971 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1890,7 +1890,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1890 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); 1890 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1891 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); 1891 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1892 dev_info(&pf->pdev->dev, " dump desc aq\n"); 1892 dev_info(&pf->pdev->dev, " dump desc aq\n");
1893 dev_info(&pf->pdev->dev, " dump stats\n");
1894 dev_info(&pf->pdev->dev, " dump reset stats\n"); 1893 dev_info(&pf->pdev->dev, " dump reset stats\n");
1895 dev_info(&pf->pdev->dev, " msg_enable [level]\n"); 1894 dev_info(&pf->pdev->dev, " msg_enable [level]\n");
1896 dev_info(&pf->pdev->dev, " read <reg>\n"); 1895 dev_info(&pf->pdev->dev, " read <reg>\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 951e8767fc50..b8230dc205ec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -219,6 +219,16 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
219#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) 219#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
220 220
221/** 221/**
222 * i40e_partition_setting_complaint - generic complaint for MFP restriction
223 * @pf: the PF struct
224 **/
225static void i40e_partition_setting_complaint(struct i40e_pf *pf)
226{
227 dev_info(&pf->pdev->dev,
228 "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
229}
230
231/**
222 * i40e_get_settings - Get Link Speed and Duplex settings 232 * i40e_get_settings - Get Link Speed and Duplex settings
223 * @netdev: network interface device structure 233 * @netdev: network interface device structure
224 * @ecmd: ethtool command 234 * @ecmd: ethtool command
@@ -485,6 +495,14 @@ static int i40e_set_settings(struct net_device *netdev,
485 u8 autoneg; 495 u8 autoneg;
486 u32 advertise; 496 u32 advertise;
487 497
498 /* Changing port settings is not supported if this isn't the
499 * port's controlling PF
500 */
501 if (hw->partition_id != 1) {
502 i40e_partition_setting_complaint(pf);
503 return -EOPNOTSUPP;
504 }
505
488 if (vsi != pf->vsi[pf->lan_vsi]) 506 if (vsi != pf->vsi[pf->lan_vsi])
489 return -EOPNOTSUPP; 507 return -EOPNOTSUPP;
490 508
@@ -687,6 +705,14 @@ static int i40e_set_pauseparam(struct net_device *netdev,
687 u8 aq_failures; 705 u8 aq_failures;
688 int err = 0; 706 int err = 0;
689 707
708 /* Changing the port's flow control is not supported if this isn't the
709 * port's controlling PF
710 */
711 if (hw->partition_id != 1) {
712 i40e_partition_setting_complaint(pf);
713 return -EOPNOTSUPP;
714 }
715
690 if (vsi != pf->vsi[pf->lan_vsi]) 716 if (vsi != pf->vsi[pf->lan_vsi])
691 return -EOPNOTSUPP; 717 return -EOPNOTSUPP;
692 718
@@ -1503,7 +1529,7 @@ static void i40e_get_wol(struct net_device *netdev,
1503 1529
1504 /* NVM bit on means WoL disabled for the port */ 1530 /* NVM bit on means WoL disabled for the port */
1505 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 1531 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
1506 if ((1 << hw->port) & wol_nvm_bits) { 1532 if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
1507 wol->supported = 0; 1533 wol->supported = 0;
1508 wol->wolopts = 0; 1534 wol->wolopts = 0;
1509 } else { 1535 } else {
@@ -1512,13 +1538,28 @@ static void i40e_get_wol(struct net_device *netdev,
1512 } 1538 }
1513} 1539}
1514 1540
1541/**
1542 * i40e_set_wol - set the WakeOnLAN configuration
1543 * @netdev: the netdev in question
1544 * @wol: the ethtool WoL setting data
1545 **/
1515static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1546static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1516{ 1547{
1517 struct i40e_netdev_priv *np = netdev_priv(netdev); 1548 struct i40e_netdev_priv *np = netdev_priv(netdev);
1518 struct i40e_pf *pf = np->vsi->back; 1549 struct i40e_pf *pf = np->vsi->back;
1550 struct i40e_vsi *vsi = np->vsi;
1519 struct i40e_hw *hw = &pf->hw; 1551 struct i40e_hw *hw = &pf->hw;
1520 u16 wol_nvm_bits; 1552 u16 wol_nvm_bits;
1521 1553
1554 /* WoL not supported if this isn't the controlling PF on the port */
1555 if (hw->partition_id != 1) {
1556 i40e_partition_setting_complaint(pf);
1557 return -EOPNOTSUPP;
1558 }
1559
1560 if (vsi != pf->vsi[pf->lan_vsi])
1561 return -EOPNOTSUPP;
1562
1522 /* NVM bit on means WoL disabled for the port */ 1563 /* NVM bit on means WoL disabled for the port */
1523 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 1564 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
1524 if (((1 << hw->port) & wol_nvm_bits)) 1565 if (((1 << hw->port) & wol_nvm_bits))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index a8b8bd95108d..27c206e62da7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -39,15 +39,6 @@
39#include "i40e_fcoe.h" 39#include "i40e_fcoe.h"
40 40
41/** 41/**
42 * i40e_rx_is_fip - returns true if the rx packet type is FIP
43 * @ptype: the packet type field from rx descriptor write-back
44 **/
45static inline bool i40e_rx_is_fip(u16 ptype)
46{
47 return ptype == I40E_RX_PTYPE_L2_FIP_PAY2;
48}
49
50/**
51 * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE 42 * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
52 * @ptype: the packet type field from rx descriptor write-back 43 * @ptype: the packet type field from rx descriptor write-back
53 **/ 44 **/
@@ -404,6 +395,7 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
404 I40E_AQ_VSI_PROP_INGRESS_UP_VALID | 395 I40E_AQ_VSI_PROP_INGRESS_UP_VALID |
405 I40E_AQ_VSI_PROP_EGRESS_UP_VALID)); 396 I40E_AQ_VSI_PROP_EGRESS_UP_VALID));
406 397
398 info->switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
407 enabled_tc = i40e_get_fcoe_tc_map(pf); 399 enabled_tc = i40e_get_fcoe_tc_map(pf);
408 i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); 400 i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true);
409 401
@@ -1511,12 +1503,16 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
1511 strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1); 1503 strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1);
1512 netdev->mtu = FCOE_MTU; 1504 netdev->mtu = FCOE_MTU;
1513 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 1505 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
1506 /* set different dev_port value 1 for FCoE netdev than the default
1507 * zero dev_port value for PF netdev, this helps biosdevname user
1508 * tool to differentiate them correctly while both attached to the
1509 * same PCI function.
1510 */
1511 netdev->dev_port = 1;
1514 i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false); 1512 i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
1515 i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false); 1513 i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
1516 i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false); 1514 i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
1517 i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false); 1515 i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
1518 i40e_add_filter(vsi, FIP_ALL_VN2VN_MACS, 0, false, false);
1519 i40e_add_filter(vsi, FIP_ALL_P2P_MACS, 0, false, false);
1520 1516
1521 /* use san mac */ 1517 /* use san mac */
1522 ether_addr_copy(netdev->dev_addr, hw->mac.san_addr); 1518 ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a5f2660d552d..cbe281be1c9f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
39 39
40#define DRV_VERSION_MAJOR 1 40#define DRV_VERSION_MAJOR 1
41#define DRV_VERSION_MINOR 2 41#define DRV_VERSION_MINOR 2
42#define DRV_VERSION_BUILD 2 42#define DRV_VERSION_BUILD 6
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -2819,8 +2819,9 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2819 * i40e_enable_misc_int_causes - enable the non-queue interrupts 2819 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2820 * @hw: ptr to the hardware info 2820 * @hw: ptr to the hardware info
2821 **/ 2821 **/
2822static void i40e_enable_misc_int_causes(struct i40e_hw *hw) 2822static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
2823{ 2823{
2824 struct i40e_hw *hw = &pf->hw;
2824 u32 val; 2825 u32 val;
2825 2826
2826 /* clear things first */ 2827 /* clear things first */
@@ -2832,11 +2833,13 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2832 I40E_PFINT_ICR0_ENA_GRST_MASK | 2833 I40E_PFINT_ICR0_ENA_GRST_MASK |
2833 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 2834 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2834 I40E_PFINT_ICR0_ENA_GPIO_MASK | 2835 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2835 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
2836 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 2836 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2837 I40E_PFINT_ICR0_ENA_VFLR_MASK | 2837 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2838 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2838 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2839 2839
2840 if (pf->flags & I40E_FLAG_PTP)
2841 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2842
2840 wr32(hw, I40E_PFINT_ICR0_ENA, val); 2843 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2841 2844
2842 /* SW_ITR_IDX = 0, but don't change INTENA */ 2845 /* SW_ITR_IDX = 0, but don't change INTENA */
@@ -2866,7 +2869,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2866 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2869 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2867 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 2870 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2868 2871
2869 i40e_enable_misc_int_causes(hw); 2872 i40e_enable_misc_int_causes(pf);
2870 2873
2871 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 2874 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2872 wr32(hw, I40E_PFINT_LNKLST0, 0); 2875 wr32(hw, I40E_PFINT_LNKLST0, 0);
@@ -2937,7 +2940,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2937/** 2940/**
2938 * i40e_irq_dynamic_disable - Disable default interrupt generation settings 2941 * i40e_irq_dynamic_disable - Disable default interrupt generation settings
2939 * @vsi: pointer to a vsi 2942 * @vsi: pointer to a vsi
2940 * @vector: enable a particular Hw Interrupt vector 2943 * @vector: disable a particular Hw Interrupt vector
2941 **/ 2944 **/
2942void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) 2945void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
2943{ 2946{
@@ -3402,10 +3405,10 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3402 err = i40e_vsi_request_irq_msix(vsi, basename); 3405 err = i40e_vsi_request_irq_msix(vsi, basename);
3403 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3406 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3404 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3407 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3405 pf->misc_int_name, pf); 3408 pf->int_name, pf);
3406 else 3409 else
3407 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3410 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3408 pf->misc_int_name, pf); 3411 pf->int_name, pf);
3409 3412
3410 if (err) 3413 if (err)
3411 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3414 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
@@ -3999,6 +4002,35 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
3999 4002
4000#endif 4003#endif
4001/** 4004/**
4005 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4006 * @pf: pointer to pf
4007 *
4008 * Get TC map for ISCSI PF type that will include iSCSI TC
4009 * and LAN TC.
4010 **/
4011static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4012{
4013 struct i40e_dcb_app_priority_table app;
4014 struct i40e_hw *hw = &pf->hw;
4015 u8 enabled_tc = 1; /* TC0 is always enabled */
4016 u8 tc, i;
4017 /* Get the iSCSI APP TLV */
4018 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4019
4020 for (i = 0; i < dcbcfg->numapps; i++) {
4021 app = dcbcfg->app[i];
4022 if (app.selector == I40E_APP_SEL_TCPIP &&
4023 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4024 tc = dcbcfg->etscfg.prioritytable[app.priority];
4025 enabled_tc |= (1 << tc);
4026 break;
4027 }
4028 }
4029
4030 return enabled_tc;
4031}
4032
4033/**
4002 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 4034 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4003 * @dcbcfg: the corresponding DCBx configuration structure 4035 * @dcbcfg: the corresponding DCBx configuration structure
4004 * 4036 *
@@ -4061,18 +4093,23 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4061 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4093 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4062 return 1; 4094 return 1;
4063 4095
4096 /* SFP mode will be enabled for all TCs on port */
4097 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4098 return i40e_dcb_get_num_tc(dcbcfg);
4099
4064 /* MFP mode return count of enabled TCs for this PF */ 4100 /* MFP mode return count of enabled TCs for this PF */
4065 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4101 if (pf->hw.func_caps.iscsi)
4102 enabled_tc = i40e_get_iscsi_tc_map(pf);
4103 else
4066 enabled_tc = pf->hw.func_caps.enabled_tcmap; 4104 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4067 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4068 if (enabled_tc & (1 << i))
4069 num_tc++;
4070 }
4071 return num_tc;
4072 }
4073 4105
4074 /* SFP mode will be enabled for all TCs on port */ 4106 /* At least have TC0 */
4075 return i40e_dcb_get_num_tc(dcbcfg); 4107 enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4108 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4109 if (enabled_tc & (1 << i))
4110 num_tc++;
4111 }
4112 return num_tc;
4076} 4113}
4077 4114
4078/** 4115/**
@@ -4110,12 +4147,15 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4110 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4147 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4111 return i40e_pf_get_default_tc(pf); 4148 return i40e_pf_get_default_tc(pf);
4112 4149
4113 /* MFP mode will have enabled TCs set by FW */
4114 if (pf->flags & I40E_FLAG_MFP_ENABLED)
4115 return pf->hw.func_caps.enabled_tcmap;
4116
4117 /* SFP mode we want PF to be enabled for all TCs */ 4150 /* SFP mode we want PF to be enabled for all TCs */
4118 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 4151 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4152 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4153
4154 /* MPF enabled and iSCSI PF type */
4155 if (pf->hw.func_caps.iscsi)
4156 return i40e_get_iscsi_tc_map(pf);
4157 else
4158 return pf->hw.func_caps.enabled_tcmap;
4119} 4159}
4120 4160
4121/** 4161/**
@@ -4505,9 +4545,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
4505 struct i40e_hw *hw = &pf->hw; 4545 struct i40e_hw *hw = &pf->hw;
4506 int err = 0; 4546 int err = 0;
4507 4547
4508 if (pf->hw.func_caps.npar_enable)
4509 goto out;
4510
4511 /* Get the initial DCB configuration */ 4548 /* Get the initial DCB configuration */
4512 err = i40e_init_dcb(hw); 4549 err = i40e_init_dcb(hw);
4513 if (!err) { 4550 if (!err) {
@@ -4533,7 +4570,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
4533 "DCBX offload is supported for this PF.\n"); 4570 "DCBX offload is supported for this PF.\n");
4534 } 4571 }
4535 } else { 4572 } else {
4536 dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n", 4573 dev_info(&pf->pdev->dev,
4574 "AQ Querying DCB configuration failed: aq_err %d\n",
4537 pf->hw.aq.asq_last_status); 4575 pf->hw.aq.asq_last_status);
4538 } 4576 }
4539 4577
@@ -4557,6 +4595,15 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4557 return; 4595 return;
4558 } 4596 }
4559 4597
4598 /* Warn user if link speed on NPAR enabled partition is not at
4599 * least 10GB
4600 */
4601 if (vsi->back->hw.func_caps.npar_enable &&
4602 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
4603 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
4604 netdev_warn(vsi->netdev,
4605 "The partition detected link speed that is less than 10Gbps\n");
4606
4560 switch (vsi->back->hw.phy.link_info.link_speed) { 4607 switch (vsi->back->hw.phy.link_info.link_speed) {
4561 case I40E_LINK_SPEED_40GB: 4608 case I40E_LINK_SPEED_40GB:
4562 strlcpy(speed, "40 Gbps", SPEED_SIZE); 4609 strlcpy(speed, "40 Gbps", SPEED_SIZE);
@@ -4836,7 +4883,7 @@ static int i40e_open(struct net_device *netdev)
4836int i40e_vsi_open(struct i40e_vsi *vsi) 4883int i40e_vsi_open(struct i40e_vsi *vsi)
4837{ 4884{
4838 struct i40e_pf *pf = vsi->back; 4885 struct i40e_pf *pf = vsi->back;
4839 char int_name[IFNAMSIZ]; 4886 char int_name[I40E_INT_NAME_STR_LEN];
4840 int err; 4887 int err;
4841 4888
4842 /* allocate descriptors */ 4889 /* allocate descriptors */
@@ -4870,7 +4917,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
4870 goto err_set_queues; 4917 goto err_set_queues;
4871 4918
4872 } else if (vsi->type == I40E_VSI_FDIR) { 4919 } else if (vsi->type == I40E_VSI_FDIR) {
4873 snprintf(int_name, sizeof(int_name) - 1, "%s-%s-fdir", 4920 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
4874 dev_driver_string(&pf->pdev->dev), 4921 dev_driver_string(&pf->pdev->dev),
4875 dev_name(&pf->pdev->dev)); 4922 dev_name(&pf->pdev->dev));
4876 err = i40e_vsi_request_irq(vsi, int_name); 4923 err = i40e_vsi_request_irq(vsi, int_name);
@@ -5494,14 +5541,18 @@ static void i40e_link_event(struct i40e_pf *pf)
5494{ 5541{
5495 bool new_link, old_link; 5542 bool new_link, old_link;
5496 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 5543 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5544 u8 new_link_speed, old_link_speed;
5497 5545
5498 /* set this to force the get_link_status call to refresh state */ 5546 /* set this to force the get_link_status call to refresh state */
5499 pf->hw.phy.get_link_info = true; 5547 pf->hw.phy.get_link_info = true;
5500 5548
5501 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 5549 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
5502 new_link = i40e_get_link_status(&pf->hw); 5550 new_link = i40e_get_link_status(&pf->hw);
5551 old_link_speed = pf->hw.phy.link_info_old.link_speed;
5552 new_link_speed = pf->hw.phy.link_info.link_speed;
5503 5553
5504 if (new_link == old_link && 5554 if (new_link == old_link &&
5555 new_link_speed == old_link_speed &&
5505 (test_bit(__I40E_DOWN, &vsi->state) || 5556 (test_bit(__I40E_DOWN, &vsi->state) ||
5506 new_link == netif_carrier_ok(vsi->netdev))) 5557 new_link == netif_carrier_ok(vsi->netdev)))
5507 return; 5558 return;
@@ -6175,8 +6226,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6175#ifdef CONFIG_I40E_DCB 6226#ifdef CONFIG_I40E_DCB
6176 ret = i40e_init_pf_dcb(pf); 6227 ret = i40e_init_pf_dcb(pf);
6177 if (ret) { 6228 if (ret) {
6178 dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret); 6229 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6179 goto end_core_reset; 6230 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6231 /* Continue without DCB enabled */
6180 } 6232 }
6181#endif /* CONFIG_I40E_DCB */ 6233#endif /* CONFIG_I40E_DCB */
6182#ifdef I40E_FCOE 6234#ifdef I40E_FCOE
@@ -6881,17 +6933,17 @@ static int i40e_init_msix(struct i40e_pf *pf)
6881 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) 6933 if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
6882 other_vecs++; 6934 other_vecs++;
6883 6935
6936 /* Scale down if necessary, and the rings will share vectors */
6937 pf->num_lan_msix = min_t(int, pf->num_lan_msix,
6938 (hw->func_caps.num_msix_vectors - other_vecs));
6939 v_budget = pf->num_lan_msix + other_vecs;
6940
6884#ifdef I40E_FCOE 6941#ifdef I40E_FCOE
6885 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 6942 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
6886 pf->num_fcoe_msix = pf->num_fcoe_qps; 6943 pf->num_fcoe_msix = pf->num_fcoe_qps;
6887 v_budget += pf->num_fcoe_msix; 6944 v_budget += pf->num_fcoe_msix;
6888 } 6945 }
6889
6890#endif 6946#endif
6891 /* Scale down if necessary, and the rings will share vectors */
6892 pf->num_lan_msix = min_t(int, pf->num_lan_msix,
6893 (hw->func_caps.num_msix_vectors - other_vecs));
6894 v_budget = pf->num_lan_msix + other_vecs;
6895 6947
6896 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 6948 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
6897 GFP_KERNEL); 6949 GFP_KERNEL);
@@ -7113,16 +7165,16 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
7113 */ 7165 */
7114 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7166 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7115 err = request_irq(pf->msix_entries[0].vector, 7167 err = request_irq(pf->msix_entries[0].vector,
7116 i40e_intr, 0, pf->misc_int_name, pf); 7168 i40e_intr, 0, pf->int_name, pf);
7117 if (err) { 7169 if (err) {
7118 dev_info(&pf->pdev->dev, 7170 dev_info(&pf->pdev->dev,
7119 "request_irq for %s failed: %d\n", 7171 "request_irq for %s failed: %d\n",
7120 pf->misc_int_name, err); 7172 pf->int_name, err);
7121 return -EFAULT; 7173 return -EFAULT;
7122 } 7174 }
7123 } 7175 }
7124 7176
7125 i40e_enable_misc_int_causes(hw); 7177 i40e_enable_misc_int_causes(pf);
7126 7178
7127 /* associate no queues to the misc vector */ 7179 /* associate no queues to the misc vector */
7128 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 7180 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
@@ -7306,7 +7358,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
7306 7358
7307#endif /* I40E_FCOE */ 7359#endif /* I40E_FCOE */
7308#ifdef CONFIG_PCI_IOV 7360#ifdef CONFIG_PCI_IOV
7309 if (pf->hw.func_caps.num_vfs) { 7361 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
7310 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 7362 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7311 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 7363 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7312 pf->num_req_vfs = min_t(int, 7364 pf->num_req_vfs = min_t(int,
@@ -7766,7 +7818,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
7766 enabled_tc = i40e_pf_get_tc_map(pf); 7818 enabled_tc = i40e_pf_get_tc_map(pf);
7767 7819
7768 /* MFP mode setup queue map and update VSI */ 7820 /* MFP mode setup queue map and update VSI */
7769 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 7821 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
7822 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
7770 memset(&ctxt, 0, sizeof(ctxt)); 7823 memset(&ctxt, 0, sizeof(ctxt));
7771 ctxt.seid = pf->main_vsi_seid; 7824 ctxt.seid = pf->main_vsi_seid;
7772 ctxt.pf_num = pf->hw.pf_id; 7825 ctxt.pf_num = pf->hw.pf_id;
@@ -7787,6 +7840,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
7787 /* Default/Main VSI is only enabled for TC0 7840 /* Default/Main VSI is only enabled for TC0
7788 * reconfigure it to enable all TCs that are 7841 * reconfigure it to enable all TCs that are
7789 * available on the port in SFP mode. 7842 * available on the port in SFP mode.
7843 * For MFP case the iSCSI PF would use this
7844 * flow to enable LAN+iSCSI TC.
7790 */ 7845 */
7791 ret = i40e_vsi_config_tc(vsi, enabled_tc); 7846 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7792 if (ret) { 7847 if (ret) {
@@ -9164,7 +9219,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9164 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 9219 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9165 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 9220 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
9166 9221
9167 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1, 9222 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
9168 "%s-%s:misc", 9223 "%s-%s:misc",
9169 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); 9224 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
9170 9225
@@ -9227,6 +9282,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9227 goto err_configure_lan_hmc; 9282 goto err_configure_lan_hmc;
9228 } 9283 }
9229 9284
9285 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
9286 * Ignore error return codes because if it was already disabled via
9287 * hardware settings this will fail
9288 */
9289 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
9290 (pf->hw.aq.fw_maj_ver < 4)) {
9291 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
9292 i40e_aq_stop_lldp(hw, true, NULL);
9293 }
9294
9230 i40e_get_mac_addr(hw, hw->mac.addr); 9295 i40e_get_mac_addr(hw, hw->mac.addr);
9231 if (!is_valid_ether_addr(hw->mac.addr)) { 9296 if (!is_valid_ether_addr(hw->mac.addr)) {
9232 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 9297 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
@@ -9256,7 +9321,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9256#ifdef CONFIG_I40E_DCB 9321#ifdef CONFIG_I40E_DCB
9257 err = i40e_init_pf_dcb(pf); 9322 err = i40e_init_pf_dcb(pf);
9258 if (err) { 9323 if (err) {
9259 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); 9324 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
9260 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 9325 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9261 /* Continue without DCB enabled */ 9326 /* Continue without DCB enabled */
9262 } 9327 }
@@ -9671,6 +9736,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
9671 9736
9672 set_bit(__I40E_SUSPENDED, &pf->state); 9737 set_bit(__I40E_SUSPENDED, &pf->state);
9673 set_bit(__I40E_DOWN, &pf->state); 9738 set_bit(__I40E_DOWN, &pf->state);
9739 del_timer_sync(&pf->service_timer);
9740 cancel_work_sync(&pf->service_task);
9674 rtnl_lock(); 9741 rtnl_lock();
9675 i40e_prep_for_reset(pf); 9742 i40e_prep_for_reset(pf);
9676 rtnl_unlock(); 9743 rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 2fb4306597e8..68e852a96680 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -71,6 +71,9 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
71i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, 71i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
72 u32 reg_addr, u64 reg_val, 72 u32 reg_addr, u64 reg_val,
73 struct i40e_asq_cmd_details *cmd_details); 73 struct i40e_asq_cmd_details *cmd_details);
74i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
75 u32 reg_addr, u64 *reg_val,
76 struct i40e_asq_cmd_details *cmd_details);
74i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 77i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
75 struct i40e_asq_cmd_details *cmd_details); 78 struct i40e_asq_cmd_details *cmd_details);
76i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, 79i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -245,6 +248,8 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw);
245bool i40e_get_link_status(struct i40e_hw *hw); 248bool i40e_get_link_status(struct i40e_hw *hw);
246i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); 249i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
247i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); 250i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
251i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
252 u32 pba_num_size);
248i40e_status i40e_validate_mac_addr(u8 *mac_addr); 253i40e_status i40e_validate_mac_addr(u8 *mac_addr);
249void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); 254void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
250#ifdef I40E_FCOE 255#ifdef I40E_FCOE
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 6d1ec926aa37..fabcfa1b45b2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -247,7 +247,12 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
247 u32 prttsyn_stat; 247 u32 prttsyn_stat;
248 int n; 248 int n;
249 249
250 if (!(pf->flags & I40E_FLAG_PTP)) 250 /* Since we cannot turn off the Rx timestamp logic if the device is
251 * configured for Tx timestamping, we check if Rx timestamping is
252 * configured. We don't want to spuriously warn about Rx timestamp
253 * hangs if we don't care about the timestamps.
254 */
255 if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
251 return; 256 return;
252 257
253 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); 258 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
@@ -305,6 +310,13 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
305 u32 hi, lo; 310 u32 hi, lo;
306 u64 ns; 311 u64 ns;
307 312
313 if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
314 return;
315
316 /* don't attempt to timestamp if we don't have an skb */
317 if (!pf->ptp_tx_skb)
318 return;
319
308 lo = rd32(hw, I40E_PRTTSYN_TXTIME_L); 320 lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
309 hi = rd32(hw, I40E_PRTTSYN_TXTIME_H); 321 hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
310 322
@@ -338,7 +350,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
338 /* Since we cannot turn off the Rx timestamp logic if the device is 350 /* Since we cannot turn off the Rx timestamp logic if the device is
339 * doing Tx timestamping, check if Rx timestamping is configured. 351 * doing Tx timestamping, check if Rx timestamping is configured.
340 */ 352 */
341 if (!pf->ptp_rx) 353 if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
342 return; 354 return;
343 355
344 hw = &pf->hw; 356 hw = &pf->hw;
@@ -467,7 +479,12 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
467 switch (config->rx_filter) { 479 switch (config->rx_filter) {
468 case HWTSTAMP_FILTER_NONE: 480 case HWTSTAMP_FILTER_NONE:
469 pf->ptp_rx = false; 481 pf->ptp_rx = false;
470 tsyntype = 0; 482 /* We set the type to V1, but do not enable UDP packet
483 * recognition. In this way, we should be as close to
484 * disabling PTP Rx timestamps as possible since V1 packets
485 * are always UDP, since L2 packets are a V2 feature.
486 */
487 tsyntype = I40E_PRTTSYN_CTL1_TSYNTYPE_V1;
471 break; 488 break;
472 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 489 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
473 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 490 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
@@ -521,17 +538,18 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
521 regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 538 regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
522 wr32(hw, I40E_PFINT_ICR0_ENA, regval); 539 wr32(hw, I40E_PFINT_ICR0_ENA, regval);
523 540
524 /* There is no simple on/off switch for Rx. To "disable" Rx support, 541 /* Although there is no simple on/off switch for Rx, we "disable" Rx
525 * ignore any received timestamps, rather than turn off the clock. 542 * timestamps by setting to V1 only mode and clear the UDP
543 * recognition. This ought to disable all PTP Rx timestamps as V1
544 * packets are always over UDP. Note that software is configured to
545 * ignore Rx timestamps via the pf->ptp_rx flag.
526 */ 546 */
527 if (pf->ptp_rx) { 547 regval = rd32(hw, I40E_PRTTSYN_CTL1);
528 regval = rd32(hw, I40E_PRTTSYN_CTL1); 548 /* clear everything but the enable bit */
529 /* clear everything but the enable bit */ 549 regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
530 regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK; 550 /* now enable bits for desired Rx timestamps */
531 /* now enable bits for desired Rx timestamps */ 551 regval |= tsyntype;
532 regval |= tsyntype; 552 wr32(hw, I40E_PRTTSYN_CTL1, regval);
533 wr32(hw, I40E_PRTTSYN_CTL1, regval);
534 }
535 553
536 return 0; 554 return 0;
537} 555}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index cecb340898fe..2206d2d36f0f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -836,8 +836,8 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
836{ 836{
837 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 837 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
838 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | 838 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
839 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK 839 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
840 /* allow 00 to be written to the index */; 840 /* allow 00 to be written to the index */
841 841
842 wr32(&vsi->back->hw, 842 wr32(&vsi->back->hw,
843 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), 843 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
@@ -1098,6 +1098,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1098 if (!rx_ring->rx_bi) 1098 if (!rx_ring->rx_bi)
1099 goto err; 1099 goto err;
1100 1100
1101 u64_stats_init(&rx_ring->syncp);
1102
1101 /* Round up to nearest 4K */ 1103 /* Round up to nearest 4K */
1102 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) 1104 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1103 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) 1105 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
@@ -1815,8 +1817,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1815 u32 tx_flags = 0; 1817 u32 tx_flags = 0;
1816 1818
1817 /* if we have a HW VLAN tag being added, default to the HW one */ 1819 /* if we have a HW VLAN tag being added, default to the HW one */
1818 if (vlan_tx_tag_present(skb)) { 1820 if (skb_vlan_tag_present(skb)) {
1819 tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; 1821 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
1820 tx_flags |= I40E_TX_FLAGS_HW_VLAN; 1822 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1821 /* else if it is a SW VLAN, check the next protocol and store the tag */ 1823 /* else if it is a SW VLAN, check the next protocol and store the tag */
1822 } else if (protocol == htons(ETH_P_8021Q)) { 1824 } else if (protocol == htons(ETH_P_8021Q)) {
@@ -1939,6 +1941,9 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
1939 * we are not already transmitting a packet to be timestamped 1941 * we are not already transmitting a packet to be timestamped
1940 */ 1942 */
1941 pf = i40e_netdev_to_pf(tx_ring->netdev); 1943 pf = i40e_netdev_to_pf(tx_ring->netdev);
1944 if (!(pf->flags & I40E_FLAG_PTP))
1945 return 0;
1946
1942 if (pf->ptp_tx && 1947 if (pf->ptp_tx &&
1943 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) { 1948 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
1944 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1949 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index c1f2eb963357..e9901ef06a63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -211,6 +211,7 @@ struct i40e_hw_capabilities {
211 bool evb_802_1_qbh; /* Bridge Port Extension */ 211 bool evb_802_1_qbh; /* Bridge Port Extension */
212 bool dcb; 212 bool dcb;
213 bool fcoe; 213 bool fcoe;
214 bool iscsi; /* Indicates iSCSI enabled */
214 bool mfp_mode_1; 215 bool mfp_mode_1;
215 bool mgmt_cem; 216 bool mgmt_cem;
216 bool ieee_1588; 217 bool ieee_1588;
@@ -431,7 +432,7 @@ struct i40e_hw {
431 u8 __iomem *hw_addr; 432 u8 __iomem *hw_addr;
432 void *back; 433 void *back;
433 434
434 /* function pointer structs */ 435 /* subsystem structs */
435 struct i40e_phy_info phy; 436 struct i40e_phy_info phy;
436 struct i40e_mac_info mac; 437 struct i40e_mac_info mac;
437 struct i40e_bus_info bus; 438 struct i40e_bus_info bus;
@@ -458,6 +459,11 @@ struct i40e_hw {
458 u8 pf_id; 459 u8 pf_id;
459 u16 main_vsi_seid; 460 u16 main_vsi_seid;
460 461
462 /* for multi-function MACs */
463 u16 partition_id;
464 u16 num_partitions;
465 u16 num_ports;
466
461 /* Closest numa node to the device */ 467 /* Closest numa node to the device */
462 u16 numa_node; 468 u16 numa_node;
463 469
@@ -1135,6 +1141,8 @@ struct i40e_hw_port_stats {
1135/* Checksum and Shadow RAM pointers */ 1141/* Checksum and Shadow RAM pointers */
1136#define I40E_SR_NVM_CONTROL_WORD 0x00 1142#define I40E_SR_NVM_CONTROL_WORD 0x00
1137#define I40E_SR_EMP_MODULE_PTR 0x0F 1143#define I40E_SR_EMP_MODULE_PTR 0x0F
1144#define I40E_SR_PBA_FLAGS 0x15
1145#define I40E_SR_PBA_BLOCK_PTR 0x16
1138#define I40E_SR_NVM_IMAGE_VERSION 0x18 1146#define I40E_SR_NVM_IMAGE_VERSION 0x18
1139#define I40E_SR_NVM_WAKE_ON_LAN 0x19 1147#define I40E_SR_NVM_WAKE_ON_LAN 0x19
1140#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 1148#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 5bae89550657..40f042af4131 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -647,6 +647,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
647 int i; 647 int i;
648 u32 reg; 648 u32 reg;
649 649
650 if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
651 return;
652
650 /* warn the VF */ 653 /* warn the VF */
651 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 654 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
652 655
@@ -668,13 +671,13 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
668 /* poll VPGEN_VFRSTAT reg to make sure 671 /* poll VPGEN_VFRSTAT reg to make sure
669 * that reset is complete 672 * that reset is complete
670 */ 673 */
671 for (i = 0; i < 100; i++) { 674 for (i = 0; i < 10; i++) {
672 /* vf reset requires driver to first reset the 675 /* VF reset requires driver to first reset the VF and then
673 * vf and then poll the status register to make sure 676 * poll the status register to make sure that the reset
674 * that the requested op was completed 677 * completed successfully. Due to internal HW FIFO flushes,
675 * successfully 678 * we must wait 10ms before the register will be valid.
676 */ 679 */
677 usleep_range(10, 20); 680 usleep_range(10000, 20000);
678 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 681 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
679 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 682 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
680 rsd = true; 683 rsd = true;
@@ -706,6 +709,7 @@ complete_reset:
706 /* tell the VF the reset is done */ 709 /* tell the VF the reset is done */
707 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 710 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
708 i40e_flush(hw); 711 i40e_flush(hw);
712 clear_bit(__I40E_VF_DISABLE, &pf->state);
709} 713}
710 714
711/** 715/**
@@ -790,11 +794,18 @@ void i40e_free_vfs(struct i40e_pf *pf)
790 794
791 if (!pf->vf) 795 if (!pf->vf)
792 return; 796 return;
797 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
798 usleep_range(1000, 2000);
793 799
794 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 800 /* Disable IOV before freeing resources. This lets any VF drivers
795 i40e_irq_dynamic_disable_icr0(pf); 801 * running in the host get themselves cleaned up before we yank
802 * the carpet out from underneath their feet.
803 */
804 if (!pci_vfs_assigned(pf->pdev))
805 pci_disable_sriov(pf->pdev);
806
807 msleep(20); /* let any messages in transit get finished up */
796 808
797 mdelay(10); /* let any messages in transit get finished up */
798 /* free up vf resources */ 809 /* free up vf resources */
799 tmp = pf->num_alloc_vfs; 810 tmp = pf->num_alloc_vfs;
800 pf->num_alloc_vfs = 0; 811 pf->num_alloc_vfs = 0;
@@ -813,7 +824,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
813 * before this function ever gets called. 824 * before this function ever gets called.
814 */ 825 */
815 if (!pci_vfs_assigned(pf->pdev)) { 826 if (!pci_vfs_assigned(pf->pdev)) {
816 pci_disable_sriov(pf->pdev);
817 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 827 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
818 * work correctly when SR-IOV gets re-enabled. 828 * work correctly when SR-IOV gets re-enabled.
819 */ 829 */
@@ -827,9 +837,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
827 dev_warn(&pf->pdev->dev, 837 dev_warn(&pf->pdev->dev,
828 "unable to disable SR-IOV because VFs are assigned.\n"); 838 "unable to disable SR-IOV because VFs are assigned.\n");
829 } 839 }
830 840 clear_bit(__I40E_VF_DISABLE, &pf->state);
831 /* Re-enable interrupt 0. */
832 i40e_irq_dynamic_enable_icr0(pf);
833} 841}
834 842
835#ifdef CONFIG_PCI_IOV 843#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 6c31bf22c2c3..60f04e96a80e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -148,7 +148,7 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
148 148
149/* general information */ 149/* general information */
150#define I40E_AQ_LARGE_BUF 512 150#define I40E_AQ_LARGE_BUF 512
151#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */ 151#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
152 152
153void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 153void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
154 u16 opcode); 154 u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index ff1b16370da9..e715bccfb5d2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -268,6 +268,8 @@ enum i40e_admin_queue_opc {
268 /* OEM commands */ 268 /* OEM commands */
269 i40e_aqc_opc_oem_parameter_change = 0xFE00, 269 i40e_aqc_opc_oem_parameter_change = 0xFE00,
270 i40e_aqc_opc_oem_device_status_change = 0xFE01, 270 i40e_aqc_opc_oem_device_status_change = 0xFE01,
271 i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
272 i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
271 273
272 /* debug commands */ 274 /* debug commands */
273 i40e_aqc_opc_debug_get_deviceid = 0xFF00, 275 i40e_aqc_opc_debug_get_deviceid = 0xFF00,
@@ -276,7 +278,6 @@ enum i40e_admin_queue_opc {
276 i40e_aqc_opc_debug_write_reg = 0xFF04, 278 i40e_aqc_opc_debug_write_reg = 0xFF04,
277 i40e_aqc_opc_debug_modify_reg = 0xFF07, 279 i40e_aqc_opc_debug_modify_reg = 0xFF07,
278 i40e_aqc_opc_debug_dump_internals = 0xFF08, 280 i40e_aqc_opc_debug_dump_internals = 0xFF08,
279 i40e_aqc_opc_debug_modify_internals = 0xFF09,
280}; 281};
281 282
282/* command structures and indirect data structures */ 283/* command structures and indirect data structures */
@@ -410,6 +411,7 @@ struct i40e_aqc_list_capabilities_element_resp {
410#define I40E_AQ_CAP_ID_VSI 0x0017 411#define I40E_AQ_CAP_ID_VSI 0x0017
411#define I40E_AQ_CAP_ID_DCB 0x0018 412#define I40E_AQ_CAP_ID_DCB 0x0018
412#define I40E_AQ_CAP_ID_FCOE 0x0021 413#define I40E_AQ_CAP_ID_FCOE 0x0021
414#define I40E_AQ_CAP_ID_ISCSI 0x0022
413#define I40E_AQ_CAP_ID_RSS 0x0040 415#define I40E_AQ_CAP_ID_RSS 0x0040
414#define I40E_AQ_CAP_ID_RXQ 0x0041 416#define I40E_AQ_CAP_ID_RXQ 0x0041
415#define I40E_AQ_CAP_ID_TXQ 0x0042 417#define I40E_AQ_CAP_ID_TXQ 0x0042
@@ -454,8 +456,11 @@ struct i40e_aqc_arp_proxy_data {
454 __le32 pfpm_proxyfc; 456 __le32 pfpm_proxyfc;
455 __le32 ip_addr; 457 __le32 ip_addr;
456 u8 mac_addr[6]; 458 u8 mac_addr[6];
459 u8 reserved[2];
457}; 460};
458 461
462I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
463
459/* Set NS Proxy Table Entry Command (indirect 0x0105) */ 464/* Set NS Proxy Table Entry Command (indirect 0x0105) */
460struct i40e_aqc_ns_proxy_data { 465struct i40e_aqc_ns_proxy_data {
461 __le16 table_idx_mac_addr_0; 466 __le16 table_idx_mac_addr_0;
@@ -481,6 +486,8 @@ struct i40e_aqc_ns_proxy_data {
481 u8 ipv6_addr_1[16]; 486 u8 ipv6_addr_1[16];
482}; 487};
483 488
489I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
490
484/* Manage LAA Command (0x0106) - obsolete */ 491/* Manage LAA Command (0x0106) - obsolete */
485struct i40e_aqc_mng_laa { 492struct i40e_aqc_mng_laa {
486 __le16 command_flags; 493 __le16 command_flags;
@@ -491,6 +498,8 @@ struct i40e_aqc_mng_laa {
491 u8 reserved2[6]; 498 u8 reserved2[6];
492}; 499};
493 500
501I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
502
494/* Manage MAC Address Read Command (indirect 0x0107) */ 503/* Manage MAC Address Read Command (indirect 0x0107) */
495struct i40e_aqc_mac_address_read { 504struct i40e_aqc_mac_address_read {
496 __le16 command_flags; 505 __le16 command_flags;
@@ -562,6 +571,8 @@ struct i40e_aqc_get_switch_config_header_resp {
562 u8 reserved[12]; 571 u8 reserved[12];
563}; 572};
564 573
574I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
575
565struct i40e_aqc_switch_config_element_resp { 576struct i40e_aqc_switch_config_element_resp {
566 u8 element_type; 577 u8 element_type;
567#define I40E_AQ_SW_ELEM_TYPE_MAC 1 578#define I40E_AQ_SW_ELEM_TYPE_MAC 1
@@ -587,6 +598,8 @@ struct i40e_aqc_switch_config_element_resp {
587 __le16 element_info; 598 __le16 element_info;
588}; 599};
589 600
601I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
602
590/* Get Switch Configuration (indirect 0x0200) 603/* Get Switch Configuration (indirect 0x0200)
591 * an array of elements are returned in the response buffer 604 * an array of elements are returned in the response buffer
592 * the first in the array is the header, remainder are elements 605 * the first in the array is the header, remainder are elements
@@ -596,6 +609,8 @@ struct i40e_aqc_get_switch_config_resp {
596 struct i40e_aqc_switch_config_element_resp element[1]; 609 struct i40e_aqc_switch_config_element_resp element[1];
597}; 610};
598 611
612I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
613
599/* Add Statistics (direct 0x0201) 614/* Add Statistics (direct 0x0201)
600 * Remove Statistics (direct 0x0202) 615 * Remove Statistics (direct 0x0202)
601 */ 616 */
@@ -661,6 +676,8 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
661 u8 reserved2[6]; 676 u8 reserved2[6];
662}; 677};
663 678
679I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
680
664/* Add VSI (indirect 0x0210) 681/* Add VSI (indirect 0x0210)
665 * this indirect command uses struct i40e_aqc_vsi_properties_data 682 * this indirect command uses struct i40e_aqc_vsi_properties_data
666 * as the indirect buffer (128 bytes) 683 * as the indirect buffer (128 bytes)
@@ -1092,6 +1109,8 @@ struct i40e_aqc_remove_tag {
1092 u8 reserved[12]; 1109 u8 reserved[12];
1093}; 1110};
1094 1111
1112I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
1113
1095/* Add multicast E-Tag (direct 0x0257) 1114/* Add multicast E-Tag (direct 0x0257)
1096 * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields 1115 * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
1097 * and no external data 1116 * and no external data
@@ -1207,7 +1226,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
1207 } ipaddr; 1226 } ipaddr;
1208 __le16 flags; 1227 __le16 flags;
1209#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 1228#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
1210#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ 1229#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
1211 I40E_AQC_ADD_CLOUD_FILTER_SHIFT) 1230 I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
1212/* 0x0000 reserved */ 1231/* 0x0000 reserved */
1213#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 1232#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
@@ -1240,7 +1259,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
1240 u8 reserved[4]; 1259 u8 reserved[4];
1241 __le16 queue_number; 1260 __le16 queue_number;
1242#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 1261#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
1243#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ 1262#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
1244 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) 1263 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
1245 u8 reserved2[14]; 1264 u8 reserved2[14];
1246 /* response section */ 1265 /* response section */
@@ -1359,6 +1378,8 @@ struct i40e_aqc_configure_vsi_ets_sla_bw_data {
1359 u8 reserved1[28]; 1378 u8 reserved1[28];
1360}; 1379};
1361 1380
1381I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
1382
1362/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) 1383/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
1363 * responds with i40e_aqc_qs_handles_resp 1384 * responds with i40e_aqc_qs_handles_resp
1364 */ 1385 */
@@ -1370,6 +1391,8 @@ struct i40e_aqc_configure_vsi_tc_bw_data {
1370 __le16 qs_handles[8]; 1391 __le16 qs_handles[8];
1371}; 1392};
1372 1393
1394I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
1395
1373/* Query vsi bw configuration (indirect 0x0408) */ 1396/* Query vsi bw configuration (indirect 0x0408) */
1374struct i40e_aqc_query_vsi_bw_config_resp { 1397struct i40e_aqc_query_vsi_bw_config_resp {
1375 u8 tc_valid_bits; 1398 u8 tc_valid_bits;
@@ -1383,6 +1406,8 @@ struct i40e_aqc_query_vsi_bw_config_resp {
1383 u8 reserved3[23]; 1406 u8 reserved3[23];
1384}; 1407};
1385 1408
1409I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
1410
1386/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ 1411/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
1387struct i40e_aqc_query_vsi_ets_sla_config_resp { 1412struct i40e_aqc_query_vsi_ets_sla_config_resp {
1388 u8 tc_valid_bits; 1413 u8 tc_valid_bits;
@@ -1394,6 +1419,8 @@ struct i40e_aqc_query_vsi_ets_sla_config_resp {
1394 __le16 tc_bw_max[2]; 1419 __le16 tc_bw_max[2];
1395}; 1420};
1396 1421
1422I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
1423
1397/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ 1424/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
1398struct i40e_aqc_configure_switching_comp_bw_limit { 1425struct i40e_aqc_configure_switching_comp_bw_limit {
1399 __le16 seid; 1426 __le16 seid;
@@ -1421,6 +1448,8 @@ struct i40e_aqc_configure_switching_comp_ets_data {
1421 u8 reserved2[96]; 1448 u8 reserved2[96];
1422}; 1449};
1423 1450
1451I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
1452
1424/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ 1453/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
1425struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { 1454struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
1426 u8 tc_valid_bits; 1455 u8 tc_valid_bits;
@@ -1432,6 +1461,9 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
1432 u8 reserved1[28]; 1461 u8 reserved1[28];
1433}; 1462};
1434 1463
1464I40E_CHECK_STRUCT_LEN(0x40,
1465 i40e_aqc_configure_switching_comp_ets_bw_limit_data);
1466
1435/* Configure Switching Component Bandwidth Allocation per Tc 1467/* Configure Switching Component Bandwidth Allocation per Tc
1436 * (indirect 0x0417) 1468 * (indirect 0x0417)
1437 */ 1469 */
@@ -1443,6 +1475,8 @@ struct i40e_aqc_configure_switching_comp_bw_config_data {
1443 u8 reserved1[20]; 1475 u8 reserved1[20];
1444}; 1476};
1445 1477
1478I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
1479
1446/* Query Switching Component Configuration (indirect 0x0418) */ 1480/* Query Switching Component Configuration (indirect 0x0418) */
1447struct i40e_aqc_query_switching_comp_ets_config_resp { 1481struct i40e_aqc_query_switching_comp_ets_config_resp {
1448 u8 tc_valid_bits; 1482 u8 tc_valid_bits;
@@ -1453,6 +1487,8 @@ struct i40e_aqc_query_switching_comp_ets_config_resp {
1453 u8 reserved2[23]; 1487 u8 reserved2[23];
1454}; 1488};
1455 1489
1490I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
1491
1456/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ 1492/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
1457struct i40e_aqc_query_port_ets_config_resp { 1493struct i40e_aqc_query_port_ets_config_resp {
1458 u8 reserved[4]; 1494 u8 reserved[4];
@@ -1468,6 +1504,8 @@ struct i40e_aqc_query_port_ets_config_resp {
1468 u8 reserved3[32]; 1504 u8 reserved3[32];
1469}; 1505};
1470 1506
1507I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
1508
1471/* Query Switching Component Bandwidth Allocation per Traffic Type 1509/* Query Switching Component Bandwidth Allocation per Traffic Type
1472 * (indirect 0x041A) 1510 * (indirect 0x041A)
1473 */ 1511 */
@@ -1482,6 +1520,8 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
1482 __le16 tc_bw_max[2]; 1520 __le16 tc_bw_max[2];
1483}; 1521};
1484 1522
1523I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
1524
1485/* Suspend/resume port TX traffic 1525/* Suspend/resume port TX traffic
1486 * (direct 0x041B and 0x041C) uses the generic SEID struct 1526 * (direct 0x041B and 0x041C) uses the generic SEID struct
1487 */ 1527 */
@@ -1495,6 +1535,8 @@ struct i40e_aqc_configure_partition_bw_data {
1495 u8 max_bw[16]; /* bandwidth limit */ 1535 u8 max_bw[16]; /* bandwidth limit */
1496}; 1536};
1497 1537
1538I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
1539
1498/* Get and set the active HMC resource profile and status. 1540/* Get and set the active HMC resource profile and status.
1499 * (direct 0x0500) and (direct 0x0501) 1541 * (direct 0x0500) and (direct 0x0501)
1500 */ 1542 */
@@ -1577,6 +1619,8 @@ struct i40e_aqc_module_desc {
1577 u8 reserved2[8]; 1619 u8 reserved2[8];
1578}; 1620};
1579 1621
1622I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
1623
1580struct i40e_aq_get_phy_abilities_resp { 1624struct i40e_aq_get_phy_abilities_resp {
1581 __le32 phy_type; /* bitmap using the above enum for offsets */ 1625 __le32 phy_type; /* bitmap using the above enum for offsets */
1582 u8 link_speed; /* bitmap using the above enum bit patterns */ 1626 u8 link_speed; /* bitmap using the above enum bit patterns */
@@ -1605,6 +1649,8 @@ struct i40e_aq_get_phy_abilities_resp {
1605 struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; 1649 struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
1606}; 1650};
1607 1651
1652I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
1653
1608/* Set PHY Config (direct 0x0601) */ 1654/* Set PHY Config (direct 0x0601) */
1609struct i40e_aq_set_phy_config { /* same bits as above in all */ 1655struct i40e_aq_set_phy_config { /* same bits as above in all */
1610 __le32 phy_type; 1656 __le32 phy_type;
@@ -1788,12 +1834,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
1788/* NVM Config Read (indirect 0x0704) */ 1834/* NVM Config Read (indirect 0x0704) */
1789struct i40e_aqc_nvm_config_read { 1835struct i40e_aqc_nvm_config_read {
1790 __le16 cmd_flags; 1836 __le16 cmd_flags;
1791#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 1837#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
1792#define ANVM_READ_SINGLE_FEATURE 0 1838#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
1793#define ANVM_READ_MULTIPLE_FEATURES 1 1839#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
1794 __le16 element_count; 1840 __le16 element_count;
1795 __le16 element_id; /* Feature/field ID */ 1841 __le16 element_id; /* Feature/field ID */
1796 u8 reserved[2]; 1842 __le16 element_id_msw; /* MSWord of field ID */
1797 __le32 address_high; 1843 __le32 address_high;
1798 __le32 address_low; 1844 __le32 address_low;
1799}; 1845};
@@ -1811,21 +1857,32 @@ struct i40e_aqc_nvm_config_write {
1811 1857
1812I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); 1858I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
1813 1859
1860/* Used for 0x0704 as well as for 0x0705 commands */
1861#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
1862#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
1863 (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
1864#define I40E_AQ_ANVM_FEATURE 0
1865#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
1814struct i40e_aqc_nvm_config_data_feature { 1866struct i40e_aqc_nvm_config_data_feature {
1815 __le16 feature_id; 1867 __le16 feature_id;
1816 __le16 instance_id; 1868#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
1869#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
1870#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
1817 __le16 feature_options; 1871 __le16 feature_options;
1818 __le16 feature_selection; 1872 __le16 feature_selection;
1819}; 1873};
1820 1874
1875I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
1876
1821struct i40e_aqc_nvm_config_data_immediate_field { 1877struct i40e_aqc_nvm_config_data_immediate_field {
1822#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2 1878 __le32 field_id;
1823 __le16 field_id; 1879 __le32 field_value;
1824 __le16 instance_id;
1825 __le16 field_options; 1880 __le16 field_options;
1826 __le16 field_value; 1881 __le16 reserved;
1827}; 1882};
1828 1883
1884I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
1885
1829/* Send to PF command (indirect 0x0801) id is only used by PF 1886/* Send to PF command (indirect 0x0801) id is only used by PF
1830 * Send to VF command (indirect 0x0802) id is only used by PF 1887 * Send to VF command (indirect 0x0802) id is only used by PF
1831 * Send to Peer PF command (indirect 0x0803) 1888 * Send to Peer PF command (indirect 0x0803)
@@ -2082,7 +2139,8 @@ struct i40e_aqc_oem_param_change {
2082#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 2139#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
2083#define I40E_AQ_OEM_PARAM_MAC 2 2140#define I40E_AQ_OEM_PARAM_MAC 2
2084 __le32 param_value1; 2141 __le32 param_value1;
2085 u8 param_value2[8]; 2142 __le16 param_value2;
2143 u8 reserved[6];
2086}; 2144};
2087 2145
2088I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); 2146I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
@@ -2096,6 +2154,28 @@ struct i40e_aqc_oem_state_change {
2096 2154
2097I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); 2155I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
2098 2156
2157/* Initialize OCSD (0xFE02, direct) */
2158struct i40e_aqc_opc_oem_ocsd_initialize {
2159 u8 type_status;
2160 u8 reserved1[3];
2161 __le32 ocsd_memory_block_addr_high;
2162 __le32 ocsd_memory_block_addr_low;
2163 __le32 requested_update_interval;
2164};
2165
2166I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
2167
2168/* Initialize OCBB (0xFE03, direct) */
2169struct i40e_aqc_opc_oem_ocbb_initialize {
2170 u8 type_status;
2171 u8 reserved1[3];
2172 __le32 ocbb_memory_block_addr_high;
2173 __le32 ocbb_memory_block_addr_low;
2174 u8 reserved2[4];
2175};
2176
2177I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
2178
2099/* debug commands */ 2179/* debug commands */
2100 2180
2101/* get device id (0xFF00) uses the generic structure */ 2181/* get device id (0xFF00) uses the generic structure */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 04c7c1557a0c..29004382f462 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -192,6 +192,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
192 return le32_to_cpu(*(volatile __le32 *)head); 192 return le32_to_cpu(*(volatile __le32 *)head);
193} 193}
194 194
195#define WB_STRIDE 0x3
196
195/** 197/**
196 * i40e_clean_tx_irq - Reclaim resources after transmit completes 198 * i40e_clean_tx_irq - Reclaim resources after transmit completes
197 * @tx_ring: tx ring to clean 199 * @tx_ring: tx ring to clean
@@ -293,6 +295,14 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
293 tx_ring->q_vector->tx.total_bytes += total_bytes; 295 tx_ring->q_vector->tx.total_bytes += total_bytes;
294 tx_ring->q_vector->tx.total_packets += total_packets; 296 tx_ring->q_vector->tx.total_packets += total_packets;
295 297
298 if (budget &&
299 !((i & WB_STRIDE) == WB_STRIDE) &&
300 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
301 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
302 tx_ring->arm_wb = true;
303 else
304 tx_ring->arm_wb = false;
305
296 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { 306 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
297 /* schedule immediate reset if we believe we hung */ 307 /* schedule immediate reset if we believe we hung */
298 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" 308 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -344,6 +354,24 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
344} 354}
345 355
346/** 356/**
357 * i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors
358 * @vsi: the VSI we care about
359 * @q_vector: the vector on which to force writeback
360 *
361 **/
362static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
363{
364 u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
365 I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
366 I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
367 /* allow 00 to be written to the index */
368
369 wr32(&vsi->back->hw,
370 I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
371 val);
372}
373
374/**
347 * i40e_set_new_dynamic_itr - Find new ITR level 375 * i40e_set_new_dynamic_itr - Find new ITR level
348 * @rc: structure containing ring performance data 376 * @rc: structure containing ring performance data
349 * 377 *
@@ -568,6 +596,8 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
568 if (!rx_ring->rx_bi) 596 if (!rx_ring->rx_bi)
569 goto err; 597 goto err;
570 598
599 u64_stats_init(&rx_ring->syncp);
600
571 /* Round up to nearest 4K */ 601 /* Round up to nearest 4K */
572 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) 602 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
573 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) 603 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
@@ -1065,6 +1095,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1065 struct i40e_vsi *vsi = q_vector->vsi; 1095 struct i40e_vsi *vsi = q_vector->vsi;
1066 struct i40e_ring *ring; 1096 struct i40e_ring *ring;
1067 bool clean_complete = true; 1097 bool clean_complete = true;
1098 bool arm_wb = false;
1068 int budget_per_ring; 1099 int budget_per_ring;
1069 1100
1070 if (test_bit(__I40E_DOWN, &vsi->state)) { 1101 if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1075,8 +1106,10 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1075 /* Since the actual Tx work is minimal, we can give the Tx a larger 1106 /* Since the actual Tx work is minimal, we can give the Tx a larger
1076 * budget and be more aggressive about cleaning up the Tx descriptors. 1107 * budget and be more aggressive about cleaning up the Tx descriptors.
1077 */ 1108 */
1078 i40e_for_each_ring(ring, q_vector->tx) 1109 i40e_for_each_ring(ring, q_vector->tx) {
1079 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); 1110 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1111 arm_wb |= ring->arm_wb;
1112 }
1080 1113
1081 /* We attempt to distribute budget to each Rx queue fairly, but don't 1114 /* We attempt to distribute budget to each Rx queue fairly, but don't
1082 * allow the budget to go below 1 because that would exit polling early. 1115 * allow the budget to go below 1 because that would exit polling early.
@@ -1087,8 +1120,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1087 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); 1120 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
1088 1121
1089 /* If work not completed, return budget and polling will return */ 1122 /* If work not completed, return budget and polling will return */
1090 if (!clean_complete) 1123 if (!clean_complete) {
1124 if (arm_wb)
1125 i40e_force_wb(vsi, q_vector);
1091 return budget; 1126 return budget;
1127 }
1092 1128
1093 /* Work is done so exit the polling mode and re-enable the interrupt */ 1129 /* Work is done so exit the polling mode and re-enable the interrupt */
1094 napi_complete(napi); 1130 napi_complete(napi);
@@ -1122,8 +1158,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1122 u32 tx_flags = 0; 1158 u32 tx_flags = 0;
1123 1159
1124 /* if we have a HW VLAN tag being added, default to the HW one */ 1160 /* if we have a HW VLAN tag being added, default to the HW one */
1125 if (vlan_tx_tag_present(skb)) { 1161 if (skb_vlan_tag_present(skb)) {
1126 tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; 1162 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
1127 tx_flags |= I40E_TX_FLAGS_HW_VLAN; 1163 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1128 /* else if it is a SW VLAN, check the next protocol and store the tag */ 1164 /* else if it is a SW VLAN, check the next protocol and store the tag */
1129 } else if (protocol == htons(ETH_P_8021Q)) { 1165 } else if (protocol == htons(ETH_P_8021Q)) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c7f29626eada..4e15903b2b6d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -238,6 +238,7 @@ struct i40e_ring {
238 u8 atr_count; 238 u8 atr_count;
239 239
240 bool ring_active; /* is ring online or not */ 240 bool ring_active; /* is ring online or not */
241 bool arm_wb; /* do something to arm write back */
241 242
242 /* stats structs */ 243 /* stats structs */
243 struct i40e_queue_stats stats; 244 struct i40e_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 68aec11f6523..3d0fdaab5cc8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -211,6 +211,7 @@ struct i40e_hw_capabilities {
211 bool evb_802_1_qbh; /* Bridge Port Extension */ 211 bool evb_802_1_qbh; /* Bridge Port Extension */
212 bool dcb; 212 bool dcb;
213 bool fcoe; 213 bool fcoe;
214 bool iscsi; /* Indicates iSCSI enabled */
214 bool mfp_mode_1; 215 bool mfp_mode_1;
215 bool mgmt_cem; 216 bool mgmt_cem;
216 bool ieee_1588; 217 bool ieee_1588;
@@ -425,7 +426,7 @@ struct i40e_hw {
425 u8 __iomem *hw_addr; 426 u8 __iomem *hw_addr;
426 void *back; 427 void *back;
427 428
428 /* function pointer structs */ 429 /* subsystem structs */
429 struct i40e_phy_info phy; 430 struct i40e_phy_info phy;
430 struct i40e_mac_info mac; 431 struct i40e_mac_info mac;
431 struct i40e_bus_info bus; 432 struct i40e_bus_info bus;
@@ -452,6 +453,11 @@ struct i40e_hw {
452 u8 pf_id; 453 u8 pf_id;
453 u16 main_vsi_seid; 454 u16 main_vsi_seid;
454 455
456 /* for multi-function MACs */
457 u16 partition_id;
458 u16 num_partitions;
459 u16 num_ports;
460
455 /* Closest numa node to the device */ 461 /* Closest numa node to the device */
456 u16 numa_node; 462 u16 numa_node;
457 463
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index cabaf599f562..8d8c201c63c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
36static const char i40evf_driver_string[] = 36static const char i40evf_driver_string[] =
37 "Intel(R) XL710/X710 Virtual Function Network Driver"; 37 "Intel(R) XL710/X710 Virtual Function Network Driver";
38 38
39#define DRV_VERSION "1.0.6" 39#define DRV_VERSION "1.2.0"
40const char i40evf_driver_version[] = DRV_VERSION; 40const char i40evf_driver_version[] = DRV_VERSION;
41static const char i40evf_copyright[] = 41static const char i40evf_copyright[] =
42 "Copyright (c) 2013 - 2014 Intel Corporation."; 42 "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -313,10 +313,6 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
313 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; 313 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
314 wr32(hw, I40E_VFINT_DYN_CTL01, val); 314 wr32(hw, I40E_VFINT_DYN_CTL01, val);
315 315
316 /* re-enable interrupt causes */
317 wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
318 wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
319
320 /* schedule work on the private workqueue */ 316 /* schedule work on the private workqueue */
321 schedule_work(&adapter->adminq_task); 317 schedule_work(&adapter->adminq_task);
322 318
@@ -947,30 +943,6 @@ static int i40evf_up_complete(struct i40evf_adapter *adapter)
947} 943}
948 944
949/** 945/**
950 * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
951 * @adapter: board private structure
952 **/
953static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
954{
955 int i;
956
957 for (i = 0; i < adapter->num_active_queues; i++)
958 i40evf_clean_rx_ring(adapter->rx_rings[i]);
959}
960
961/**
962 * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
963 * @adapter: board private structure
964 **/
965static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
966{
967 int i;
968
969 for (i = 0; i < adapter->num_active_queues; i++)
970 i40evf_clean_tx_ring(adapter->tx_rings[i]);
971}
972
973/**
974 * i40e_down - Shutdown the connection processing 946 * i40e_down - Shutdown the connection processing
975 * @adapter: board private structure 947 * @adapter: board private structure
976 **/ 948 **/
@@ -982,6 +954,12 @@ void i40evf_down(struct i40evf_adapter *adapter)
982 if (adapter->state == __I40EVF_DOWN) 954 if (adapter->state == __I40EVF_DOWN)
983 return; 955 return;
984 956
957 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
958 &adapter->crit_section))
959 usleep_range(500, 1000);
960
961 i40evf_irq_disable(adapter);
962
985 /* remove all MAC filters */ 963 /* remove all MAC filters */
986 list_for_each_entry(f, &adapter->mac_filter_list, list) { 964 list_for_each_entry(f, &adapter->mac_filter_list, list) {
987 f->remove = true; 965 f->remove = true;
@@ -992,25 +970,27 @@ void i40evf_down(struct i40evf_adapter *adapter)
992 } 970 }
993 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && 971 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
994 adapter->state != __I40EVF_RESETTING) { 972 adapter->state != __I40EVF_RESETTING) {
995 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; 973 /* cancel any current operation */
974 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
975 adapter->aq_pending = 0;
976 /* Schedule operations to close down the HW. Don't wait
977 * here for this to complete. The watchdog is still running
978 * and it will take care of this.
979 */
980 adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
996 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 981 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
997 /* disable receives */
998 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; 982 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
999 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1000 msleep(20);
1001 } 983 }
1002 netif_tx_disable(netdev); 984 netif_tx_disable(netdev);
1003 985
1004 netif_tx_stop_all_queues(netdev); 986 netif_tx_stop_all_queues(netdev);
1005 987
1006 i40evf_irq_disable(adapter);
1007
1008 i40evf_napi_disable_all(adapter); 988 i40evf_napi_disable_all(adapter);
1009 989
1010 netif_carrier_off(netdev); 990 msleep(20);
1011 991
1012 i40evf_clean_all_tx_rings(adapter); 992 netif_carrier_off(netdev);
1013 i40evf_clean_all_rx_rings(adapter); 993 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1014} 994}
1015 995
1016/** 996/**
@@ -1356,8 +1336,13 @@ static void i40evf_watchdog_task(struct work_struct *work)
1356 /* Process admin queue tasks. After init, everything gets done 1336 /* Process admin queue tasks. After init, everything gets done
1357 * here so we don't race on the admin queue. 1337 * here so we don't race on the admin queue.
1358 */ 1338 */
1359 if (adapter->aq_pending) 1339 if (adapter->aq_pending) {
1340 if (!i40evf_asq_done(hw)) {
1341 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1342 i40evf_send_api_ver(adapter);
1343 }
1360 goto watchdog_done; 1344 goto watchdog_done;
1345 }
1361 1346
1362 if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) { 1347 if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
1363 i40evf_map_queues(adapter); 1348 i40evf_map_queues(adapter);
@@ -1401,11 +1386,14 @@ static void i40evf_watchdog_task(struct work_struct *work)
1401 1386
1402 if (adapter->state == __I40EVF_RUNNING) 1387 if (adapter->state == __I40EVF_RUNNING)
1403 i40evf_request_stats(adapter); 1388 i40evf_request_stats(adapter);
1404
1405 i40evf_irq_enable(adapter, true);
1406 i40evf_fire_sw_int(adapter, 0xFF);
1407
1408watchdog_done: 1389watchdog_done:
1390 if (adapter->state == __I40EVF_RUNNING) {
1391 i40evf_irq_enable_queues(adapter, ~0);
1392 i40evf_fire_sw_int(adapter, 0xFF);
1393 } else {
1394 i40evf_fire_sw_int(adapter, 0x1);
1395 }
1396
1409 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); 1397 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1410restart_watchdog: 1398restart_watchdog:
1411 if (adapter->state == __I40EVF_REMOVE) 1399 if (adapter->state == __I40EVF_REMOVE)
@@ -1633,17 +1621,17 @@ static void i40evf_adminq_task(struct work_struct *work)
1633 u16 pending; 1621 u16 pending;
1634 1622
1635 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) 1623 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
1636 return; 1624 goto out;
1637 1625
1638 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; 1626 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
1639 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 1627 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1640 if (!event.msg_buf) 1628 if (!event.msg_buf)
1641 return; 1629 goto out;
1642 1630
1643 v_msg = (struct i40e_virtchnl_msg *)&event.desc; 1631 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
1644 do { 1632 do {
1645 ret = i40evf_clean_arq_element(hw, &event, &pending); 1633 ret = i40evf_clean_arq_element(hw, &event, &pending);
1646 if (ret) 1634 if (ret || !v_msg->v_opcode)
1647 break; /* No event to process or error cleaning ARQ */ 1635 break; /* No event to process or error cleaning ARQ */
1648 1636
1649 i40evf_virtchnl_completion(adapter, v_msg->v_opcode, 1637 i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
@@ -1688,10 +1676,10 @@ static void i40evf_adminq_task(struct work_struct *work)
1688 if (oldval != val) 1676 if (oldval != val)
1689 wr32(hw, hw->aq.asq.len, val); 1677 wr32(hw, hw->aq.asq.len, val);
1690 1678
1679 kfree(event.msg_buf);
1680out:
1691 /* re-enable Admin queue interrupt cause */ 1681 /* re-enable Admin queue interrupt cause */
1692 i40evf_misc_irq_enable(adapter); 1682 i40evf_misc_irq_enable(adapter);
1693
1694 kfree(event.msg_buf);
1695} 1683}
1696 1684
1697/** 1685/**
@@ -2053,12 +2041,8 @@ static void i40evf_init_task(struct work_struct *work)
2053 /* aq msg sent, awaiting reply */ 2041 /* aq msg sent, awaiting reply */
2054 err = i40evf_verify_api_ver(adapter); 2042 err = i40evf_verify_api_ver(adapter);
2055 if (err) { 2043 if (err) {
2056 dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n", 2044 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
2057 err);
2058 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
2059 dev_info(&pdev->dev, "Resending request\n");
2060 err = i40evf_send_api_ver(adapter); 2045 err = i40evf_send_api_ver(adapter);
2061 }
2062 goto err; 2046 goto err;
2063 } 2047 }
2064 err = i40evf_send_vf_config_msg(adapter); 2048 err = i40evf_send_vf_config_msg(adapter);
@@ -2081,7 +2065,6 @@ static void i40evf_init_task(struct work_struct *work)
2081 } 2065 }
2082 err = i40evf_get_vf_config(adapter); 2066 err = i40evf_get_vf_config(adapter);
2083 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { 2067 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
2084 dev_info(&pdev->dev, "Resending VF config request\n");
2085 err = i40evf_send_vf_config_msg(adapter); 2068 err = i40evf_send_vf_config_msg(adapter);
2086 goto err; 2069 goto err;
2087 } 2070 }
@@ -2230,12 +2213,18 @@ err:
2230static void i40evf_shutdown(struct pci_dev *pdev) 2213static void i40evf_shutdown(struct pci_dev *pdev)
2231{ 2214{
2232 struct net_device *netdev = pci_get_drvdata(pdev); 2215 struct net_device *netdev = pci_get_drvdata(pdev);
2216 struct i40evf_adapter *adapter = netdev_priv(netdev);
2233 2217
2234 netif_device_detach(netdev); 2218 netif_device_detach(netdev);
2235 2219
2236 if (netif_running(netdev)) 2220 if (netif_running(netdev))
2237 i40evf_close(netdev); 2221 i40evf_close(netdev);
2238 2222
2223 /* Prevent the watchdog from running. */
2224 adapter->state = __I40EVF_REMOVE;
2225 adapter->aq_required = 0;
2226 adapter->aq_pending = 0;
2227
2239#ifdef CONFIG_PM 2228#ifdef CONFIG_PM
2240 pci_save_state(pdev); 2229 pci_save_state(pdev);
2241 2230
@@ -2448,7 +2437,18 @@ static void i40evf_remove(struct pci_dev *pdev)
2448 unregister_netdev(netdev); 2437 unregister_netdev(netdev);
2449 adapter->netdev_registered = false; 2438 adapter->netdev_registered = false;
2450 } 2439 }
2440
2441 /* Shut down all the garbage mashers on the detention level */
2451 adapter->state = __I40EVF_REMOVE; 2442 adapter->state = __I40EVF_REMOVE;
2443 adapter->aq_required = 0;
2444 adapter->aq_pending = 0;
2445 i40evf_request_reset(adapter);
2446 msleep(20);
2447 /* If the FW isn't responding, kick it once, but only once. */
2448 if (!i40evf_asq_done(hw)) {
2449 i40evf_request_reset(adapter);
2450 msleep(20);
2451 }
2452 2452
2453 if (adapter->msix_entries) { 2453 if (adapter->msix_entries) {
2454 i40evf_misc_irq_disable(adapter); 2454 i40evf_misc_irq_disable(adapter);
@@ -2477,6 +2477,10 @@ static void i40evf_remove(struct pci_dev *pdev)
2477 list_del(&f->list); 2477 list_del(&f->list);
2478 kfree(f); 2478 kfree(f);
2479 } 2479 }
2480 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
2481 list_del(&f->list);
2482 kfree(f);
2483 }
2480 2484
2481 free_netdev(netdev); 2485 free_netdev(netdev);
2482 2486
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 5fde5a7f4591..3f0c85ecbca6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -715,14 +715,14 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
715 } 715 }
716 return; 716 return;
717 } 717 }
718 if (v_opcode != adapter->current_op)
719 dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n",
720 adapter->current_op, v_opcode);
721 if (v_retval) { 718 if (v_retval) {
722 dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n", 719 dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
723 __func__, v_retval, v_opcode); 720 __func__, v_retval, v_opcode);
724 } 721 }
725 switch (v_opcode) { 722 switch (v_opcode) {
723 case I40E_VIRTCHNL_OP_VERSION:
724 /* no action, but also not an error */
725 break;
726 case I40E_VIRTCHNL_OP_GET_STATS: { 726 case I40E_VIRTCHNL_OP_GET_STATS: {
727 struct i40e_eth_stats *stats = 727 struct i40e_eth_stats *stats =
728 (struct i40e_eth_stats *)msg; 728 (struct i40e_eth_stats *)msg;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 82d891e183b1..c2bd4f98a837 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -29,7 +29,7 @@
29#include "e1000_mac.h" 29#include "e1000_mac.h"
30#include "e1000_82575.h" 30#include "e1000_82575.h"
31 31
32#include <linux/clocksource.h> 32#include <linux/timecounter.h>
33#include <linux/net_tstamp.h> 33#include <linux/net_tstamp.h>
34#include <linux/ptp_clock_kernel.h> 34#include <linux/ptp_clock_kernel.h>
35#include <linux/bitops.h> 35#include <linux/bitops.h>
@@ -343,6 +343,9 @@ struct hwmon_buff {
343 }; 343 };
344#endif 344#endif
345 345
346#define IGB_N_EXTTS 2
347#define IGB_N_PEROUT 2
348#define IGB_N_SDP 4
346#define IGB_RETA_SIZE 128 349#define IGB_RETA_SIZE 128
347 350
348/* board specific private data structure */ 351/* board specific private data structure */
@@ -439,6 +442,12 @@ struct igb_adapter {
439 u32 tx_hwtstamp_timeouts; 442 u32 tx_hwtstamp_timeouts;
440 u32 rx_hwtstamp_cleared; 443 u32 rx_hwtstamp_cleared;
441 444
445 struct ptp_pin_desc sdp_config[IGB_N_SDP];
446 struct {
447 struct timespec start;
448 struct timespec period;
449 } perout[IGB_N_PEROUT];
450
442 char fw_version[32]; 451 char fw_version[32];
443#ifdef CONFIG_IGB_HWMON 452#ifdef CONFIG_IGB_HWMON
444 struct hwmon_buff *igb_hwmon_buff; 453 struct hwmon_buff *igb_hwmon_buff;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ff59897a9463..f366b3b96d03 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5035,9 +5035,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
5035 5035
5036 skb_tx_timestamp(skb); 5036 skb_tx_timestamp(skb);
5037 5037
5038 if (vlan_tx_tag_present(skb)) { 5038 if (skb_vlan_tag_present(skb)) {
5039 tx_flags |= IGB_TX_FLAGS_VLAN; 5039 tx_flags |= IGB_TX_FLAGS_VLAN;
5040 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 5040 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
5041 } 5041 }
5042 5042
5043 /* record initial flags and protocol */ 5043 /* record initial flags and protocol */
@@ -5384,6 +5384,80 @@ void igb_update_stats(struct igb_adapter *adapter,
5384 } 5384 }
5385} 5385}
5386 5386
5387static void igb_tsync_interrupt(struct igb_adapter *adapter)
5388{
5389 struct e1000_hw *hw = &adapter->hw;
5390 struct ptp_clock_event event;
5391 struct timespec ts;
5392 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
5393
5394 if (tsicr & TSINTR_SYS_WRAP) {
5395 event.type = PTP_CLOCK_PPS;
5396 if (adapter->ptp_caps.pps)
5397 ptp_clock_event(adapter->ptp_clock, &event);
5398 else
5399 dev_err(&adapter->pdev->dev, "unexpected SYS WRAP");
5400 ack |= TSINTR_SYS_WRAP;
5401 }
5402
5403 if (tsicr & E1000_TSICR_TXTS) {
5404 /* retrieve hardware timestamp */
5405 schedule_work(&adapter->ptp_tx_work);
5406 ack |= E1000_TSICR_TXTS;
5407 }
5408
5409 if (tsicr & TSINTR_TT0) {
5410 spin_lock(&adapter->tmreg_lock);
5411 ts = timespec_add(adapter->perout[0].start,
5412 adapter->perout[0].period);
5413 wr32(E1000_TRGTTIML0, ts.tv_nsec);
5414 wr32(E1000_TRGTTIMH0, ts.tv_sec);
5415 tsauxc = rd32(E1000_TSAUXC);
5416 tsauxc |= TSAUXC_EN_TT0;
5417 wr32(E1000_TSAUXC, tsauxc);
5418 adapter->perout[0].start = ts;
5419 spin_unlock(&adapter->tmreg_lock);
5420 ack |= TSINTR_TT0;
5421 }
5422
5423 if (tsicr & TSINTR_TT1) {
5424 spin_lock(&adapter->tmreg_lock);
5425 ts = timespec_add(adapter->perout[1].start,
5426 adapter->perout[1].period);
5427 wr32(E1000_TRGTTIML1, ts.tv_nsec);
5428 wr32(E1000_TRGTTIMH1, ts.tv_sec);
5429 tsauxc = rd32(E1000_TSAUXC);
5430 tsauxc |= TSAUXC_EN_TT1;
5431 wr32(E1000_TSAUXC, tsauxc);
5432 adapter->perout[1].start = ts;
5433 spin_unlock(&adapter->tmreg_lock);
5434 ack |= TSINTR_TT1;
5435 }
5436
5437 if (tsicr & TSINTR_AUTT0) {
5438 nsec = rd32(E1000_AUXSTMPL0);
5439 sec = rd32(E1000_AUXSTMPH0);
5440 event.type = PTP_CLOCK_EXTTS;
5441 event.index = 0;
5442 event.timestamp = sec * 1000000000ULL + nsec;
5443 ptp_clock_event(adapter->ptp_clock, &event);
5444 ack |= TSINTR_AUTT0;
5445 }
5446
5447 if (tsicr & TSINTR_AUTT1) {
5448 nsec = rd32(E1000_AUXSTMPL1);
5449 sec = rd32(E1000_AUXSTMPH1);
5450 event.type = PTP_CLOCK_EXTTS;
5451 event.index = 1;
5452 event.timestamp = sec * 1000000000ULL + nsec;
5453 ptp_clock_event(adapter->ptp_clock, &event);
5454 ack |= TSINTR_AUTT1;
5455 }
5456
5457 /* acknowledge the interrupts */
5458 wr32(E1000_TSICR, ack);
5459}
5460
5387static irqreturn_t igb_msix_other(int irq, void *data) 5461static irqreturn_t igb_msix_other(int irq, void *data)
5388{ 5462{
5389 struct igb_adapter *adapter = data; 5463 struct igb_adapter *adapter = data;
@@ -5415,16 +5489,8 @@ static irqreturn_t igb_msix_other(int irq, void *data)
5415 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5489 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5416 } 5490 }
5417 5491
5418 if (icr & E1000_ICR_TS) { 5492 if (icr & E1000_ICR_TS)
5419 u32 tsicr = rd32(E1000_TSICR); 5493 igb_tsync_interrupt(adapter);
5420
5421 if (tsicr & E1000_TSICR_TXTS) {
5422 /* acknowledge the interrupt */
5423 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5424 /* retrieve hardware timestamp */
5425 schedule_work(&adapter->ptp_tx_work);
5426 }
5427 }
5428 5494
5429 wr32(E1000_EIMS, adapter->eims_other); 5495 wr32(E1000_EIMS, adapter->eims_other);
5430 5496
@@ -6011,8 +6077,12 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
6011 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; 6077 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
6012 6078
6013 /* reply to reset with ack and vf mac address */ 6079 /* reply to reset with ack and vf mac address */
6014 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 6080 if (!is_zero_ether_addr(vf_mac)) {
6015 memcpy(addr, vf_mac, ETH_ALEN); 6081 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
6082 memcpy(addr, vf_mac, ETH_ALEN);
6083 } else {
6084 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
6085 }
6016 igb_write_mbx(hw, msgbuf, 3, vf); 6086 igb_write_mbx(hw, msgbuf, 3, vf);
6017} 6087}
6018 6088
@@ -6203,16 +6273,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
6203 mod_timer(&adapter->watchdog_timer, jiffies + 1); 6273 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6204 } 6274 }
6205 6275
6206 if (icr & E1000_ICR_TS) { 6276 if (icr & E1000_ICR_TS)
6207 u32 tsicr = rd32(E1000_TSICR); 6277 igb_tsync_interrupt(adapter);
6208
6209 if (tsicr & E1000_TSICR_TXTS) {
6210 /* acknowledge the interrupt */
6211 wr32(E1000_TSICR, E1000_TSICR_TXTS);
6212 /* retrieve hardware timestamp */
6213 schedule_work(&adapter->ptp_tx_work);
6214 }
6215 }
6216 6278
6217 napi_schedule(&q_vector->napi); 6279 napi_schedule(&q_vector->napi);
6218 6280
@@ -6257,16 +6319,8 @@ static irqreturn_t igb_intr(int irq, void *data)
6257 mod_timer(&adapter->watchdog_timer, jiffies + 1); 6319 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6258 } 6320 }
6259 6321
6260 if (icr & E1000_ICR_TS) { 6322 if (icr & E1000_ICR_TS)
6261 u32 tsicr = rd32(E1000_TSICR); 6323 igb_tsync_interrupt(adapter);
6262
6263 if (tsicr & E1000_TSICR_TXTS) {
6264 /* acknowledge the interrupt */
6265 wr32(E1000_TSICR, E1000_TSICR_TXTS);
6266 /* retrieve hardware timestamp */
6267 schedule_work(&adapter->ptp_tx_work);
6268 }
6269 }
6270 6324
6271 napi_schedule(&q_vector->napi); 6325 napi_schedule(&q_vector->napi);
6272 6326
@@ -6527,15 +6581,17 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6527 DMA_FROM_DEVICE); 6581 DMA_FROM_DEVICE);
6528} 6582}
6529 6583
6584static inline bool igb_page_is_reserved(struct page *page)
6585{
6586 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
6587}
6588
6530static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, 6589static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6531 struct page *page, 6590 struct page *page,
6532 unsigned int truesize) 6591 unsigned int truesize)
6533{ 6592{
6534 /* avoid re-using remote pages */ 6593 /* avoid re-using remote pages */
6535 if (unlikely(page_to_nid(page) != numa_node_id())) 6594 if (unlikely(igb_page_is_reserved(page)))
6536 return false;
6537
6538 if (unlikely(page->pfmemalloc))
6539 return false; 6595 return false;
6540 6596
6541#if (PAGE_SIZE < 8192) 6597#if (PAGE_SIZE < 8192)
@@ -6545,22 +6601,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6545 6601
6546 /* flip page offset to other buffer */ 6602 /* flip page offset to other buffer */
6547 rx_buffer->page_offset ^= IGB_RX_BUFSZ; 6603 rx_buffer->page_offset ^= IGB_RX_BUFSZ;
6548
6549 /* Even if we own the page, we are not allowed to use atomic_set()
6550 * This would break get_page_unless_zero() users.
6551 */
6552 atomic_inc(&page->_count);
6553#else 6604#else
6554 /* move offset up to the next cache line */ 6605 /* move offset up to the next cache line */
6555 rx_buffer->page_offset += truesize; 6606 rx_buffer->page_offset += truesize;
6556 6607
6557 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) 6608 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
6558 return false; 6609 return false;
6559
6560 /* bump ref count on page before it is given to the stack */
6561 get_page(page);
6562#endif 6610#endif
6563 6611
6612 /* Even if we own the page, we are not allowed to use atomic_set()
6613 * This would break get_page_unless_zero() users.
6614 */
6615 atomic_inc(&page->_count);
6616
6564 return true; 6617 return true;
6565} 6618}
6566 6619
@@ -6603,13 +6656,12 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6603 6656
6604 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 6657 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
6605 6658
6606 /* we can reuse buffer as-is, just make sure it is local */ 6659 /* page is not reserved, we can reuse buffer as-is */
6607 if (likely((page_to_nid(page) == numa_node_id()) && 6660 if (likely(!igb_page_is_reserved(page)))
6608 !page->pfmemalloc))
6609 return true; 6661 return true;
6610 6662
6611 /* this page cannot be reused so discard it */ 6663 /* this page cannot be reused so discard it */
6612 put_page(page); 6664 __free_page(page);
6613 return false; 6665 return false;
6614 } 6666 }
6615 6667
@@ -6627,7 +6679,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6627 struct page *page; 6679 struct page *page;
6628 6680
6629 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 6681 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6630
6631 page = rx_buffer->page; 6682 page = rx_buffer->page;
6632 prefetchw(page); 6683 prefetchw(page);
6633 6684
@@ -7042,8 +7093,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
7042 i -= rx_ring->count; 7093 i -= rx_ring->count;
7043 } 7094 }
7044 7095
7045 /* clear the hdr_addr for the next_to_use descriptor */ 7096 /* clear the status bits for the next_to_use descriptor */
7046 rx_desc->read.hdr_addr = 0; 7097 rx_desc->wb.upper.status_error = 0;
7047 7098
7048 cleaned_count--; 7099 cleaned_count--;
7049 } while (cleaned_count); 7100 } while (cleaned_count);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 794c139f0cc0..d20fc8ed11f1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -256,14 +256,9 @@ static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
256 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, 256 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
257 ptp_caps); 257 ptp_caps);
258 unsigned long flags; 258 unsigned long flags;
259 s64 now;
260 259
261 spin_lock_irqsave(&igb->tmreg_lock, flags); 260 spin_lock_irqsave(&igb->tmreg_lock, flags);
262 261 timecounter_adjtime(&igb->tc, delta);
263 now = timecounter_read(&igb->tc);
264 now += delta;
265 timecounter_init(&igb->tc, &igb->cc, now);
266
267 spin_unlock_irqrestore(&igb->tmreg_lock, flags); 262 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
268 263
269 return 0; 264 return 0;
@@ -360,12 +355,239 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
360 return 0; 355 return 0;
361} 356}
362 357
358static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
359{
360 u32 *ptr = pin < 2 ? ctrl : ctrl_ext;
361 u32 mask[IGB_N_SDP] = {
362 E1000_CTRL_SDP0_DIR,
363 E1000_CTRL_SDP1_DIR,
364 E1000_CTRL_EXT_SDP2_DIR,
365 E1000_CTRL_EXT_SDP3_DIR,
366 };
367
368 if (input)
369 *ptr &= ~mask[pin];
370 else
371 *ptr |= mask[pin];
372}
373
374static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
375{
376 struct e1000_hw *hw = &igb->hw;
377 u32 aux0_sel_sdp[IGB_N_SDP] = {
378 AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
379 };
380 u32 aux1_sel_sdp[IGB_N_SDP] = {
381 AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
382 };
383 u32 ts_sdp_en[IGB_N_SDP] = {
384 TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
385 };
386 u32 ctrl, ctrl_ext, tssdp = 0;
387
388 ctrl = rd32(E1000_CTRL);
389 ctrl_ext = rd32(E1000_CTRL_EXT);
390 tssdp = rd32(E1000_TSSDP);
391
392 igb_pin_direction(pin, 1, &ctrl, &ctrl_ext);
393
394 /* Make sure this pin is not enabled as an output. */
395 tssdp &= ~ts_sdp_en[pin];
396
397 if (chan == 1) {
398 tssdp &= ~AUX1_SEL_SDP3;
399 tssdp |= aux1_sel_sdp[pin] | AUX1_TS_SDP_EN;
400 } else {
401 tssdp &= ~AUX0_SEL_SDP3;
402 tssdp |= aux0_sel_sdp[pin] | AUX0_TS_SDP_EN;
403 }
404
405 wr32(E1000_TSSDP, tssdp);
406 wr32(E1000_CTRL, ctrl);
407 wr32(E1000_CTRL_EXT, ctrl_ext);
408}
409
410static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
411{
412 struct e1000_hw *hw = &igb->hw;
413 u32 aux0_sel_sdp[IGB_N_SDP] = {
414 AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
415 };
416 u32 aux1_sel_sdp[IGB_N_SDP] = {
417 AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
418 };
419 u32 ts_sdp_en[IGB_N_SDP] = {
420 TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
421 };
422 u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
423 TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0,
424 TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0,
425 };
426 u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
427 TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
428 TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
429 };
430 u32 ts_sdp_sel_clr[IGB_N_SDP] = {
431 TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
432 TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
433 };
434 u32 ctrl, ctrl_ext, tssdp = 0;
435
436 ctrl = rd32(E1000_CTRL);
437 ctrl_ext = rd32(E1000_CTRL_EXT);
438 tssdp = rd32(E1000_TSSDP);
439
440 igb_pin_direction(pin, 0, &ctrl, &ctrl_ext);
441
442 /* Make sure this pin is not enabled as an input. */
443 if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin])
444 tssdp &= ~AUX0_TS_SDP_EN;
445
446 if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin])
447 tssdp &= ~AUX1_TS_SDP_EN;
448
449 tssdp &= ~ts_sdp_sel_clr[pin];
450 if (chan == 1)
451 tssdp |= ts_sdp_sel_tt1[pin];
452 else
453 tssdp |= ts_sdp_sel_tt0[pin];
454
455 tssdp |= ts_sdp_en[pin];
456
457 wr32(E1000_TSSDP, tssdp);
458 wr32(E1000_CTRL, ctrl);
459 wr32(E1000_CTRL_EXT, ctrl_ext);
460}
461
462static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
463 struct ptp_clock_request *rq, int on)
464{
465 struct igb_adapter *igb =
466 container_of(ptp, struct igb_adapter, ptp_caps);
467 struct e1000_hw *hw = &igb->hw;
468 u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
469 unsigned long flags;
470 struct timespec ts;
471 int pin;
472 s64 ns;
473
474 switch (rq->type) {
475 case PTP_CLK_REQ_EXTTS:
476 if (on) {
477 pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
478 rq->extts.index);
479 if (pin < 0)
480 return -EBUSY;
481 }
482 if (rq->extts.index == 1) {
483 tsauxc_mask = TSAUXC_EN_TS1;
484 tsim_mask = TSINTR_AUTT1;
485 } else {
486 tsauxc_mask = TSAUXC_EN_TS0;
487 tsim_mask = TSINTR_AUTT0;
488 }
489 spin_lock_irqsave(&igb->tmreg_lock, flags);
490 tsauxc = rd32(E1000_TSAUXC);
491 tsim = rd32(E1000_TSIM);
492 if (on) {
493 igb_pin_extts(igb, rq->extts.index, pin);
494 tsauxc |= tsauxc_mask;
495 tsim |= tsim_mask;
496 } else {
497 tsauxc &= ~tsauxc_mask;
498 tsim &= ~tsim_mask;
499 }
500 wr32(E1000_TSAUXC, tsauxc);
501 wr32(E1000_TSIM, tsim);
502 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
503 return 0;
504
505 case PTP_CLK_REQ_PEROUT:
506 if (on) {
507 pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
508 rq->perout.index);
509 if (pin < 0)
510 return -EBUSY;
511 }
512 ts.tv_sec = rq->perout.period.sec;
513 ts.tv_nsec = rq->perout.period.nsec;
514 ns = timespec_to_ns(&ts);
515 ns = ns >> 1;
516 if (on && ns < 500000LL) {
517 /* 2k interrupts per second is an awful lot. */
518 return -EINVAL;
519 }
520 ts = ns_to_timespec(ns);
521 if (rq->perout.index == 1) {
522 tsauxc_mask = TSAUXC_EN_TT1;
523 tsim_mask = TSINTR_TT1;
524 trgttiml = E1000_TRGTTIML1;
525 trgttimh = E1000_TRGTTIMH1;
526 } else {
527 tsauxc_mask = TSAUXC_EN_TT0;
528 tsim_mask = TSINTR_TT0;
529 trgttiml = E1000_TRGTTIML0;
530 trgttimh = E1000_TRGTTIMH0;
531 }
532 spin_lock_irqsave(&igb->tmreg_lock, flags);
533 tsauxc = rd32(E1000_TSAUXC);
534 tsim = rd32(E1000_TSIM);
535 if (on) {
536 int i = rq->perout.index;
537
538 igb_pin_perout(igb, i, pin);
539 igb->perout[i].start.tv_sec = rq->perout.start.sec;
540 igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
541 igb->perout[i].period.tv_sec = ts.tv_sec;
542 igb->perout[i].period.tv_nsec = ts.tv_nsec;
543 wr32(trgttiml, rq->perout.start.sec);
544 wr32(trgttimh, rq->perout.start.nsec);
545 tsauxc |= tsauxc_mask;
546 tsim |= tsim_mask;
547 } else {
548 tsauxc &= ~tsauxc_mask;
549 tsim &= ~tsim_mask;
550 }
551 wr32(E1000_TSAUXC, tsauxc);
552 wr32(E1000_TSIM, tsim);
553 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
554 return 0;
555
556 case PTP_CLK_REQ_PPS:
557 spin_lock_irqsave(&igb->tmreg_lock, flags);
558 tsim = rd32(E1000_TSIM);
559 if (on)
560 tsim |= TSINTR_SYS_WRAP;
561 else
562 tsim &= ~TSINTR_SYS_WRAP;
563 wr32(E1000_TSIM, tsim);
564 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
565 return 0;
566 }
567
568 return -EOPNOTSUPP;
569}
570
363static int igb_ptp_feature_enable(struct ptp_clock_info *ptp, 571static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
364 struct ptp_clock_request *rq, int on) 572 struct ptp_clock_request *rq, int on)
365{ 573{
366 return -EOPNOTSUPP; 574 return -EOPNOTSUPP;
367} 575}
368 576
577static int igb_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
578 enum ptp_pin_function func, unsigned int chan)
579{
580 switch (func) {
581 case PTP_PF_NONE:
582 case PTP_PF_EXTTS:
583 case PTP_PF_PEROUT:
584 break;
585 case PTP_PF_PHYSYNC:
586 return -1;
587 }
588 return 0;
589}
590
369/** 591/**
370 * igb_ptp_tx_work 592 * igb_ptp_tx_work
371 * @work: pointer to work struct 593 * @work: pointer to work struct
@@ -756,6 +978,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
756{ 978{
757 struct e1000_hw *hw = &adapter->hw; 979 struct e1000_hw *hw = &adapter->hw;
758 struct net_device *netdev = adapter->netdev; 980 struct net_device *netdev = adapter->netdev;
981 int i;
759 982
760 switch (hw->mac.type) { 983 switch (hw->mac.type) {
761 case e1000_82576: 984 case e1000_82576:
@@ -770,7 +993,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
770 adapter->ptp_caps.settime = igb_ptp_settime_82576; 993 adapter->ptp_caps.settime = igb_ptp_settime_82576;
771 adapter->ptp_caps.enable = igb_ptp_feature_enable; 994 adapter->ptp_caps.enable = igb_ptp_feature_enable;
772 adapter->cc.read = igb_ptp_read_82576; 995 adapter->cc.read = igb_ptp_read_82576;
773 adapter->cc.mask = CLOCKSOURCE_MASK(64); 996 adapter->cc.mask = CYCLECOUNTER_MASK(64);
774 adapter->cc.mult = 1; 997 adapter->cc.mult = 1;
775 adapter->cc.shift = IGB_82576_TSYNC_SHIFT; 998 adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
776 /* Dial the nominal frequency. */ 999 /* Dial the nominal frequency. */
@@ -790,7 +1013,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
790 adapter->ptp_caps.settime = igb_ptp_settime_82576; 1013 adapter->ptp_caps.settime = igb_ptp_settime_82576;
791 adapter->ptp_caps.enable = igb_ptp_feature_enable; 1014 adapter->ptp_caps.enable = igb_ptp_feature_enable;
792 adapter->cc.read = igb_ptp_read_82580; 1015 adapter->cc.read = igb_ptp_read_82580;
793 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); 1016 adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
794 adapter->cc.mult = 1; 1017 adapter->cc.mult = 1;
795 adapter->cc.shift = 0; 1018 adapter->cc.shift = 0;
796 /* Enable the timer functions by clearing bit 31. */ 1019 /* Enable the timer functions by clearing bit 31. */
@@ -798,16 +1021,27 @@ void igb_ptp_init(struct igb_adapter *adapter)
798 break; 1021 break;
799 case e1000_i210: 1022 case e1000_i210:
800 case e1000_i211: 1023 case e1000_i211:
1024 for (i = 0; i < IGB_N_SDP; i++) {
1025 struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
1026
1027 snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
1028 ppd->index = i;
1029 ppd->func = PTP_PF_NONE;
1030 }
801 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); 1031 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
802 adapter->ptp_caps.owner = THIS_MODULE; 1032 adapter->ptp_caps.owner = THIS_MODULE;
803 adapter->ptp_caps.max_adj = 62499999; 1033 adapter->ptp_caps.max_adj = 62499999;
804 adapter->ptp_caps.n_ext_ts = 0; 1034 adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
805 adapter->ptp_caps.pps = 0; 1035 adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
1036 adapter->ptp_caps.n_pins = IGB_N_SDP;
1037 adapter->ptp_caps.pps = 1;
1038 adapter->ptp_caps.pin_config = adapter->sdp_config;
806 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; 1039 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
807 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; 1040 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
808 adapter->ptp_caps.gettime = igb_ptp_gettime_i210; 1041 adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
809 adapter->ptp_caps.settime = igb_ptp_settime_i210; 1042 adapter->ptp_caps.settime = igb_ptp_settime_i210;
810 adapter->ptp_caps.enable = igb_ptp_feature_enable; 1043 adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
1044 adapter->ptp_caps.verify = igb_ptp_verify_pin;
811 /* Enable the timer functions by clearing bit 31. */ 1045 /* Enable the timer functions by clearing bit 31. */
812 wr32(E1000_TSAUXC, 0x0); 1046 wr32(E1000_TSAUXC, 0x0);
813 break; 1047 break;
@@ -905,6 +1139,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
905void igb_ptp_reset(struct igb_adapter *adapter) 1139void igb_ptp_reset(struct igb_adapter *adapter)
906{ 1140{
907 struct e1000_hw *hw = &adapter->hw; 1141 struct e1000_hw *hw = &adapter->hw;
1142 unsigned long flags;
908 1143
909 if (!(adapter->flags & IGB_FLAG_PTP)) 1144 if (!(adapter->flags & IGB_FLAG_PTP))
910 return; 1145 return;
@@ -912,6 +1147,8 @@ void igb_ptp_reset(struct igb_adapter *adapter)
912 /* reset the tstamp_config */ 1147 /* reset the tstamp_config */
913 igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); 1148 igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
914 1149
1150 spin_lock_irqsave(&adapter->tmreg_lock, flags);
1151
915 switch (adapter->hw.mac.type) { 1152 switch (adapter->hw.mac.type) {
916 case e1000_82576: 1153 case e1000_82576:
917 /* Dial the nominal frequency. */ 1154 /* Dial the nominal frequency. */
@@ -922,23 +1159,25 @@ void igb_ptp_reset(struct igb_adapter *adapter)
922 case e1000_i350: 1159 case e1000_i350:
923 case e1000_i210: 1160 case e1000_i210:
924 case e1000_i211: 1161 case e1000_i211:
925 /* Enable the timer functions and interrupts. */
926 wr32(E1000_TSAUXC, 0x0); 1162 wr32(E1000_TSAUXC, 0x0);
1163 wr32(E1000_TSSDP, 0x0);
927 wr32(E1000_TSIM, TSYNC_INTERRUPTS); 1164 wr32(E1000_TSIM, TSYNC_INTERRUPTS);
928 wr32(E1000_IMS, E1000_IMS_TS); 1165 wr32(E1000_IMS, E1000_IMS_TS);
929 break; 1166 break;
930 default: 1167 default:
931 /* No work to do. */ 1168 /* No work to do. */
932 return; 1169 goto out;
933 } 1170 }
934 1171
935 /* Re-initialize the timer. */ 1172 /* Re-initialize the timer. */
936 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { 1173 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
937 struct timespec ts = ktime_to_timespec(ktime_get_real()); 1174 struct timespec ts = ktime_to_timespec(ktime_get_real());
938 1175
939 igb_ptp_settime_i210(&adapter->ptp_caps, &ts); 1176 igb_ptp_write_i210(adapter, &ts);
940 } else { 1177 } else {
941 timecounter_init(&adapter->tc, &adapter->cc, 1178 timecounter_init(&adapter->tc, &adapter->cc,
942 ktime_to_ns(ktime_get_real())); 1179 ktime_to_ns(ktime_get_real()));
943 } 1180 }
1181out:
1182 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
944} 1183}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index edea13b0ee85..ebf9d4a42fdd 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2237,9 +2237,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2237 return NETDEV_TX_BUSY; 2237 return NETDEV_TX_BUSY;
2238 } 2238 }
2239 2239
2240 if (vlan_tx_tag_present(skb)) { 2240 if (skb_vlan_tag_present(skb)) {
2241 tx_flags |= IGBVF_TX_FLAGS_VLAN; 2241 tx_flags |= IGBVF_TX_FLAGS_VLAN;
2242 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); 2242 tx_flags |= (skb_vlan_tag_get(skb) <<
2243 IGBVF_TX_FLAGS_VLAN_SHIFT);
2243 } 2244 }
2244 2245
2245 if (protocol == htons(ETH_P_IP)) 2246 if (protocol == htons(ETH_P_IP))
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index aa87605b144a..11a1bdbe3fd9 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1532,9 +1532,9 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1532 DESC_NEEDED))) 1532 DESC_NEEDED)))
1533 return NETDEV_TX_BUSY; 1533 return NETDEV_TX_BUSY;
1534 1534
1535 if (vlan_tx_tag_present(skb)) { 1535 if (skb_vlan_tag_present(skb)) {
1536 tx_flags |= IXGB_TX_FLAGS_VLAN; 1536 tx_flags |= IXGB_TX_FLAGS_VLAN;
1537 vlan_id = vlan_tx_tag_get(skb); 1537 vlan_id = skb_vlan_tag_get(skb);
1538 } 1538 }
1539 1539
1540 first = adapter->tx_ring.next_to_use; 1540 first = adapter->tx_ring.next_to_use;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b6137be43920..7dcbbec09a70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -38,7 +38,7 @@
38#include <linux/if_vlan.h> 38#include <linux/if_vlan.h>
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40 40
41#include <linux/clocksource.h> 41#include <linux/timecounter.h>
42#include <linux/net_tstamp.h> 42#include <linux/net_tstamp.h>
43#include <linux/ptp_clock_kernel.h> 43#include <linux/ptp_clock_kernel.h>
44 44
@@ -76,6 +76,8 @@
76#define IXGBE_MAX_RXD 4096 76#define IXGBE_MAX_RXD 4096
77#define IXGBE_MIN_RXD 64 77#define IXGBE_MIN_RXD 64
78 78
79#define IXGBE_ETH_P_LLDP 0x88CC
80
79/* flow control */ 81/* flow control */
80#define IXGBE_MIN_FCRTL 0x40 82#define IXGBE_MIN_FCRTL 0x40
81#define IXGBE_MAX_FCRTL 0x7FF80 83#define IXGBE_MAX_FCRTL 0x7FF80
@@ -753,6 +755,7 @@ struct ixgbe_adapter {
753 u32 timer_event_accumulator; 755 u32 timer_event_accumulator;
754 u32 vferr_refcount; 756 u32 vferr_refcount;
755 struct ixgbe_mac_addr *mac_table; 757 struct ixgbe_mac_addr *mac_table;
758 u16 vxlan_port;
756 struct kobject *info_kobj; 759 struct kobject *info_kobj;
757#ifdef CONFIG_IXGBE_HWMON 760#ifdef CONFIG_IXGBE_HWMON
758 struct hwmon_buff *ixgbe_hwmon_buff; 761 struct hwmon_buff *ixgbe_hwmon_buff;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 67b02bde179e..70cc4c5c0a01 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -50,6 +50,7 @@
50#include <linux/if_bridge.h> 50#include <linux/if_bridge.h>
51#include <linux/prefetch.h> 51#include <linux/prefetch.h>
52#include <scsi/fc/fc_fcoe.h> 52#include <scsi/fc/fc_fcoe.h>
53#include <net/vxlan.h>
53 54
54#ifdef CONFIG_OF 55#ifdef CONFIG_OF
55#include <linux/of_net.h> 56#include <linux/of_net.h>
@@ -1396,12 +1397,23 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1396 union ixgbe_adv_rx_desc *rx_desc, 1397 union ixgbe_adv_rx_desc *rx_desc,
1397 struct sk_buff *skb) 1398 struct sk_buff *skb)
1398{ 1399{
1400 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1401 __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1402 bool encap_pkt = false;
1403
1399 skb_checksum_none_assert(skb); 1404 skb_checksum_none_assert(skb);
1400 1405
1401 /* Rx csum disabled */ 1406 /* Rx csum disabled */
1402 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1407 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1403 return; 1408 return;
1404 1409
1410 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) &&
1411 (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
1412 encap_pkt = true;
1413 skb->encapsulation = 1;
1414 skb->ip_summed = CHECKSUM_NONE;
1415 }
1416
1405 /* if IP and error */ 1417 /* if IP and error */
1406 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && 1418 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1407 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { 1419 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
@@ -1413,8 +1425,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1413 return; 1425 return;
1414 1426
1415 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { 1427 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1416 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1417
1418 /* 1428 /*
1419 * 82599 errata, UDP frames with a 0 checksum can be marked as 1429 * 82599 errata, UDP frames with a 0 checksum can be marked as
1420 * checksum errors. 1430 * checksum errors.
@@ -1429,6 +1439,17 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1429 1439
1430 /* It must be a TCP or UDP packet with a valid checksum */ 1440 /* It must be a TCP or UDP packet with a valid checksum */
1431 skb->ip_summed = CHECKSUM_UNNECESSARY; 1441 skb->ip_summed = CHECKSUM_UNNECESSARY;
1442 if (encap_pkt) {
1443 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1444 return;
1445
1446 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1447 ring->rx_stats.csum_err++;
1448 return;
1449 }
1450 /* If we checked the outer header let the stack know */
1451 skb->csum_level = 1;
1452 }
1432} 1453}
1433 1454
1434static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, 1455static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
@@ -3564,10 +3585,24 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3564 /* Enable MAC Anti-Spoofing */ 3585 /* Enable MAC Anti-Spoofing */
3565 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), 3586 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3566 adapter->num_vfs); 3587 adapter->num_vfs);
3588
3589 /* Ensure LLDP is set for Ethertype Antispoofing if we will be
3590 * calling set_ethertype_anti_spoofing for each VF in loop below
3591 */
3592 if (hw->mac.ops.set_ethertype_anti_spoofing)
3593 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
3594 (IXGBE_ETQF_FILTER_EN | /* enable filter */
3595 IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */
3596 IXGBE_ETH_P_LLDP)); /* LLDP eth type */
3597
3567 /* For VFs that have spoof checking turned off */ 3598 /* For VFs that have spoof checking turned off */
3568 for (i = 0; i < adapter->num_vfs; i++) { 3599 for (i = 0; i < adapter->num_vfs; i++) {
3569 if (!adapter->vfinfo[i].spoofchk_enabled) 3600 if (!adapter->vfinfo[i].spoofchk_enabled)
3570 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); 3601 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3602
3603 /* enable ethertype anti spoofing if hw supports it */
3604 if (hw->mac.ops.set_ethertype_anti_spoofing)
3605 hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
3571 } 3606 }
3572} 3607}
3573 3608
@@ -5627,6 +5662,10 @@ static int ixgbe_open(struct net_device *netdev)
5627 5662
5628 ixgbe_up_complete(adapter); 5663 ixgbe_up_complete(adapter);
5629 5664
5665#if IS_ENABLED(CONFIG_IXGBE_VXLAN)
5666 vxlan_get_rx_port(netdev);
5667
5668#endif
5630 return 0; 5669 return 0;
5631 5670
5632err_set_queues: 5671err_set_queues:
@@ -7217,8 +7256,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7217 first->gso_segs = 1; 7256 first->gso_segs = 1;
7218 7257
7219 /* if we have a HW VLAN tag being added default to the HW one */ 7258 /* if we have a HW VLAN tag being added default to the HW one */
7220 if (vlan_tx_tag_present(skb)) { 7259 if (skb_vlan_tag_present(skb)) {
7221 tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; 7260 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
7222 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; 7261 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
7223 /* else if it is a SW VLAN check the next protocol and store the tag */ 7262 /* else if it is a SW VLAN check the next protocol and store the tag */
7224 } else if (protocol == htons(ETH_P_8021Q)) { 7263 } else if (protocol == htons(ETH_P_8021Q)) {
@@ -7771,6 +7810,64 @@ static int ixgbe_set_features(struct net_device *netdev,
7771 return 0; 7810 return 0;
7772} 7811}
7773 7812
7813/**
7814 * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
7815 * @dev: The port's netdev
7816 * @sa_family: Socket Family that VXLAN is notifiying us about
7817 * @port: New UDP port number that VXLAN started listening to
7818 **/
7819static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
7820 __be16 port)
7821{
7822 struct ixgbe_adapter *adapter = netdev_priv(dev);
7823 struct ixgbe_hw *hw = &adapter->hw;
7824 u16 new_port = ntohs(port);
7825
7826 if (sa_family == AF_INET6)
7827 return;
7828
7829 if (adapter->vxlan_port == new_port) {
7830 netdev_info(dev, "Port %d already offloaded\n", new_port);
7831 return;
7832 }
7833
7834 if (adapter->vxlan_port) {
7835 netdev_info(dev,
7836 "Hit Max num of UDP ports, not adding port %d\n",
7837 new_port);
7838 return;
7839 }
7840
7841 adapter->vxlan_port = new_port;
7842 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
7843}
7844
7845/**
7846 * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away
7847 * @dev: The port's netdev
7848 * @sa_family: Socket Family that VXLAN is notifying us about
7849 * @port: UDP port number that VXLAN stopped listening to
7850 **/
7851static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
7852 __be16 port)
7853{
7854 struct ixgbe_adapter *adapter = netdev_priv(dev);
7855 struct ixgbe_hw *hw = &adapter->hw;
7856 u16 new_port = ntohs(port);
7857
7858 if (sa_family == AF_INET6)
7859 return;
7860
7861 if (adapter->vxlan_port != new_port) {
7862 netdev_info(dev, "Port %d was not found, not deleting\n",
7863 new_port);
7864 return;
7865 }
7866
7867 adapter->vxlan_port = 0;
7868 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0);
7869}
7870
7774static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 7871static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7775 struct net_device *dev, 7872 struct net_device *dev,
7776 const unsigned char *addr, u16 vid, 7873 const unsigned char *addr, u16 vid,
@@ -7786,7 +7883,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7786} 7883}
7787 7884
7788static int ixgbe_ndo_bridge_setlink(struct net_device *dev, 7885static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7789 struct nlmsghdr *nlh) 7886 struct nlmsghdr *nlh, u16 flags)
7790{ 7887{
7791 struct ixgbe_adapter *adapter = netdev_priv(dev); 7888 struct ixgbe_adapter *adapter = netdev_priv(dev);
7792 struct nlattr *attr, *br_spec; 7889 struct nlattr *attr, *br_spec;
@@ -7982,6 +8079,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7982 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 8079 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7983 .ndo_dfwd_add_station = ixgbe_fwd_add, 8080 .ndo_dfwd_add_station = ixgbe_fwd_add,
7984 .ndo_dfwd_del_station = ixgbe_fwd_del, 8081 .ndo_dfwd_del_station = ixgbe_fwd_del,
8082 .ndo_add_vxlan_port = ixgbe_add_vxlan_port,
8083 .ndo_del_vxlan_port = ixgbe_del_vxlan_port,
7985}; 8084};
7986 8085
7987/** 8086/**
@@ -8339,6 +8438,15 @@ skip_sriov:
8339 netdev->priv_flags |= IFF_UNICAST_FLT; 8438 netdev->priv_flags |= IFF_UNICAST_FLT;
8340 netdev->priv_flags |= IFF_SUPP_NOFCS; 8439 netdev->priv_flags |= IFF_SUPP_NOFCS;
8341 8440
8441 switch (adapter->hw.mac.type) {
8442 case ixgbe_mac_X550:
8443 case ixgbe_mac_X550EM_x:
8444 netdev->hw_enc_features |= NETIF_F_RXCSUM;
8445 break;
8446 default:
8447 break;
8448 }
8449
8342#ifdef CONFIG_IXGBE_DCB 8450#ifdef CONFIG_IXGBE_DCB
8343 netdev->dcbnl_ops = &dcbnl_ops; 8451 netdev->dcbnl_ops = &dcbnl_ops;
8344#endif 8452#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 5fd4b5271f9a..79c00f57d3e7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -261,18 +261,9 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
261 struct ixgbe_adapter *adapter = 261 struct ixgbe_adapter *adapter =
262 container_of(ptp, struct ixgbe_adapter, ptp_caps); 262 container_of(ptp, struct ixgbe_adapter, ptp_caps);
263 unsigned long flags; 263 unsigned long flags;
264 u64 now;
265 264
266 spin_lock_irqsave(&adapter->tmreg_lock, flags); 265 spin_lock_irqsave(&adapter->tmreg_lock, flags);
267 266 timecounter_adjtime(&adapter->tc, delta);
268 now = timecounter_read(&adapter->tc);
269 now += delta;
270
271 /* reset the timecounter */
272 timecounter_init(&adapter->tc,
273 &adapter->cc,
274 now);
275
276 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); 267 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
277 268
278 ixgbe_ptp_setup_sdp(adapter); 269 ixgbe_ptp_setup_sdp(adapter);
@@ -802,7 +793,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
802 793
803 memset(&adapter->cc, 0, sizeof(adapter->cc)); 794 memset(&adapter->cc, 0, sizeof(adapter->cc));
804 adapter->cc.read = ixgbe_ptp_read; 795 adapter->cc.read = ixgbe_ptp_read;
805 adapter->cc.mask = CLOCKSOURCE_MASK(64); 796 adapter->cc.mask = CYCLECOUNTER_MASK(64);
806 adapter->cc.shift = shift; 797 adapter->cc.shift = shift;
807 adapter->cc.mult = 1; 798 adapter->cc.mult = 1;
808 799
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index c76ba90ecc6e..7f37fe7269a7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -101,9 +101,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
101 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 101 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
102 } 102 }
103 103
104 /* We do not support RSS w/ SR-IOV */
105 adapter->ring_feature[RING_F_RSS].limit = 1;
106
107 /* Disable RSC when in SR-IOV mode */ 104 /* Disable RSC when in SR-IOV mode */
108 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | 105 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
109 IXGBE_FLAG2_RSC_ENABLED); 106 IXGBE_FLAG2_RSC_ENABLED);
@@ -1097,14 +1094,12 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
1097 u16 vlan, u8 qos) 1094 u16 vlan, u8 qos)
1098{ 1095{
1099 struct ixgbe_hw *hw = &adapter->hw; 1096 struct ixgbe_hw *hw = &adapter->hw;
1100 int err = 0; 1097 int err;
1101 1098
1102 if (adapter->vfinfo[vf].pf_vlan) 1099 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
1103 err = ixgbe_set_vf_vlan(adapter, false,
1104 adapter->vfinfo[vf].pf_vlan,
1105 vf);
1106 if (err) 1100 if (err)
1107 goto out; 1101 goto out;
1102
1108 ixgbe_set_vmvir(adapter, vlan, qos, vf); 1103 ixgbe_set_vmvir(adapter, vlan, qos, vf);
1109 ixgbe_set_vmolr(hw, vf, false); 1104 ixgbe_set_vmolr(hw, vf, false);
1110 if (adapter->vfinfo[vf].spoofchk_enabled) 1105 if (adapter->vfinfo[vf].spoofchk_enabled)
@@ -1143,6 +1138,11 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
1143 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); 1138 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
1144 if (adapter->vfinfo[vf].vlan_count) 1139 if (adapter->vfinfo[vf].vlan_count)
1145 adapter->vfinfo[vf].vlan_count--; 1140 adapter->vfinfo[vf].vlan_count--;
1141
1142 /* disable hide VLAN on X550 */
1143 if (hw->mac.type >= ixgbe_mac_X550)
1144 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
1145
1146 adapter->vfinfo[vf].pf_vlan = 0; 1146 adapter->vfinfo[vf].pf_vlan = 0;
1147 adapter->vfinfo[vf].pf_qos = 0; 1147 adapter->vfinfo[vf].pf_qos = 0;
1148 1148
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index d101b25dc4b6..fc5ecee56ca8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -378,6 +378,8 @@ struct ixgbe_thermal_sensor_data {
378#define IXGBE_SPOOF_MACAS_MASK 0xFF 378#define IXGBE_SPOOF_MACAS_MASK 0xFF
379#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 379#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
380#define IXGBE_SPOOF_VLANAS_SHIFT 8 380#define IXGBE_SPOOF_VLANAS_SHIFT 8
381#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000
382#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16
381#define IXGBE_PFVFSPOOF_REG_COUNT 8 383#define IXGBE_PFVFSPOOF_REG_COUNT 8
382 384
383#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ 385#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
@@ -399,6 +401,7 @@ struct ixgbe_thermal_sensor_data {
399 401
400#define IXGBE_WUPL 0x05900 402#define IXGBE_WUPL 0x05900
401#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ 403#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
404#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */
402#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ 405#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
403#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host 406#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host
404 * Filter Table */ 407 * Filter Table */
@@ -1540,6 +1543,7 @@ enum {
1540#define IXGBE_MAX_ETQF_FILTERS 8 1543#define IXGBE_MAX_ETQF_FILTERS 8
1541#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ 1544#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
1542#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ 1545#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
1546#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */
1543#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ 1547#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
1544#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ 1548#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
1545#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ 1549#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
@@ -1565,6 +1569,9 @@ enum {
1565#define IXGBE_ETQF_FILTER_FCOE 2 1569#define IXGBE_ETQF_FILTER_FCOE 2
1566#define IXGBE_ETQF_FILTER_1588 3 1570#define IXGBE_ETQF_FILTER_1588 3
1567#define IXGBE_ETQF_FILTER_FIP 4 1571#define IXGBE_ETQF_FILTER_FIP 4
1572#define IXGBE_ETQF_FILTER_LLDP 5
1573#define IXGBE_ETQF_FILTER_LACP 6
1574
1568/* VLAN Control Bit Masks */ 1575/* VLAN Control Bit Masks */
1569#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ 1576#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
1570#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ 1577#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
@@ -2122,6 +2129,7 @@ enum {
2122#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 2129#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
2123#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 2130#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
2124#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ 2131#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
2132#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */
2125#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ 2133#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
2126#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ 2134#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
2127#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ 2135#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
@@ -2139,6 +2147,7 @@ enum {
2139#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ 2147#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
2140#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ 2148#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
2141#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ 2149#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
2150#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */
2142#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ 2151#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
2143#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ 2152#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
2144#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ 2153#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
@@ -2227,6 +2236,8 @@ enum {
2227#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ 2236#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
2228#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ 2237#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
2229#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ 2238#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
2239#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */
2240#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */
2230#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ 2241#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
2231#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ 2242#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
2232#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ 2243#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
@@ -3056,6 +3067,7 @@ struct ixgbe_mac_operations {
3056 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); 3067 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
3057 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); 3068 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
3058 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); 3069 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
3070 void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
3059 3071
3060 /* DMA Coalescing */ 3072 /* DMA Coalescing */
3061 s32 (*dmac_config)(struct ixgbe_hw *hw); 3073 s32 (*dmac_config)(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index ba54ff07b438..49395420c9b3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -55,9 +55,6 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
55{ 55{
56 struct ixgbe_mac_info *mac = &hw->mac; 56 struct ixgbe_mac_info *mac = &hw->mac;
57 57
58 /* Call PHY identify routine to get the phy type */
59 ixgbe_identify_phy_generic(hw);
60
61 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; 58 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
62 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; 59 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
63 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; 60 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index ffdd1231f419..50bf81908dd6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -80,7 +80,7 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
80 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 80 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
81 * ixgbe_hw struct in order to set up EEPROM access. 81 * ixgbe_hw struct in order to set up EEPROM access.
82 **/ 82 **/
83s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) 83static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
84{ 84{
85 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 85 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
86 u32 eec; 86 u32 eec;
@@ -110,8 +110,8 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
110 * @device_type: 3 bit device type 110 * @device_type: 3 bit device type
111 * @phy_data: Pointer to read data from the register 111 * @phy_data: Pointer to read data from the register
112 **/ 112 **/
113s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, 113static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
114 u32 device_type, u32 *data) 114 u32 device_type, u32 *data)
115{ 115{
116 u32 i, command, error; 116 u32 i, command, error;
117 117
@@ -158,7 +158,8 @@ s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
158 * 158 *
159 * Reads a 16 bit word from the EEPROM using the hostif. 159 * Reads a 16 bit word from the EEPROM using the hostif.
160 **/ 160 **/
161s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) 161static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
162 u16 *data)
162{ 163{
163 s32 status; 164 s32 status;
164 struct ixgbe_hic_read_shadow_ram buffer; 165 struct ixgbe_hic_read_shadow_ram buffer;
@@ -193,8 +194,8 @@ s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
193 * 194 *
194 * Reads a 16 bit word(s) from the EEPROM using the hostif. 195 * Reads a 16 bit word(s) from the EEPROM using the hostif.
195 **/ 196 **/
196s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, 197static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
197 u16 offset, u16 words, u16 *data) 198 u16 offset, u16 words, u16 *data)
198{ 199{
199 struct ixgbe_hic_read_shadow_ram buffer; 200 struct ixgbe_hic_read_shadow_ram buffer;
200 u32 current_word = 0; 201 u32 current_word = 0;
@@ -331,7 +332,8 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
331 * 332 *
332 * Returns a negative error code on error, or the 16-bit checksum 333 * Returns a negative error code on error, or the 16-bit checksum
333 **/ 334 **/
334s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) 335static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
336 u32 buffer_size)
335{ 337{
336 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; 338 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
337 u16 *local_buffer; 339 u16 *local_buffer;
@@ -407,7 +409,7 @@ s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
407 * 409 *
408 * Returns a negative error code on error, or the 16-bit checksum 410 * Returns a negative error code on error, or the 16-bit checksum
409 **/ 411 **/
410s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) 412static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
411{ 413{
412 return ixgbe_calc_checksum_X550(hw, NULL, 0); 414 return ixgbe_calc_checksum_X550(hw, NULL, 0);
413} 415}
@@ -419,7 +421,7 @@ s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
419 * 421 *
420 * Reads a 16 bit word from the EEPROM using the hostif. 422 * Reads a 16 bit word from the EEPROM using the hostif.
421 **/ 423 **/
422s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) 424static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
423{ 425{
424 s32 status = 0; 426 s32 status = 0;
425 427
@@ -440,7 +442,8 @@ s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
440 * Performs checksum calculation and validates the EEPROM checksum. If the 442 * Performs checksum calculation and validates the EEPROM checksum. If the
441 * caller does not need checksum_val, the value can be NULL. 443 * caller does not need checksum_val, the value can be NULL.
442 **/ 444 **/
443s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) 445static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
446 u16 *checksum_val)
444{ 447{
445 s32 status; 448 s32 status;
446 u16 checksum; 449 u16 checksum;
@@ -489,7 +492,8 @@ s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
489 * 492 *
490 * Write a 16 bit word to the EEPROM using the hostif. 493 * Write a 16 bit word to the EEPROM using the hostif.
491 **/ 494 **/
492s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data) 495static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
496 u16 data)
493{ 497{
494 s32 status; 498 s32 status;
495 struct ixgbe_hic_write_shadow_ram buffer; 499 struct ixgbe_hic_write_shadow_ram buffer;
@@ -517,7 +521,7 @@ s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
517 * 521 *
518 * Write a 16 bit word to the EEPROM using the hostif. 522 * Write a 16 bit word to the EEPROM using the hostif.
519 **/ 523 **/
520s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) 524static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
521{ 525{
522 s32 status = 0; 526 s32 status = 0;
523 527
@@ -537,7 +541,7 @@ s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
537 * 541 *
538 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. 542 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
539 **/ 543 **/
540s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) 544static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
541{ 545{
542 s32 status = 0; 546 s32 status = 0;
543 union ixgbe_hic_hdr2 buffer; 547 union ixgbe_hic_hdr2 buffer;
@@ -560,7 +564,7 @@ s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
560 * checksum and updates the EEPROM and instructs the hardware to update 564 * checksum and updates the EEPROM and instructs the hardware to update
561 * the flash. 565 * the flash.
562 **/ 566 **/
563s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) 567static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
564{ 568{
565 s32 status; 569 s32 status;
566 u16 checksum = 0; 570 u16 checksum = 0;
@@ -600,8 +604,9 @@ s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
600 * 604 *
601 * Write a 16 bit word(s) to the EEPROM using the hostif. 605 * Write a 16 bit word(s) to the EEPROM using the hostif.
602 **/ 606 **/
603s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, 607static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
604 u16 offset, u16 words, u16 *data) 608 u16 offset, u16 words,
609 u16 *data)
605{ 610{
606 s32 status = 0; 611 s32 status = 0;
607 u32 i = 0; 612 u32 i = 0;
@@ -630,7 +635,7 @@ s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
630/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers 635/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
631 * @hw: pointer to hardware structure 636 * @hw: pointer to hardware structure
632 **/ 637 **/
633void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) 638static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
634{ 639{
635 struct ixgbe_mac_info *mac = &hw->mac; 640 struct ixgbe_mac_info *mac = &hw->mac;
636 641
@@ -647,7 +652,7 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
647/** ixgbe_setup_sfp_modules_X550em - Setup SFP module 652/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
648 * @hw: pointer to hardware structure 653 * @hw: pointer to hardware structure
649 */ 654 */
650s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) 655static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
651{ 656{
652 bool setup_linear; 657 bool setup_linear;
653 u16 reg_slice, edc_mode; 658 u16 reg_slice, edc_mode;
@@ -703,9 +708,9 @@ s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
703 * @speed: pointer to link speed 708 * @speed: pointer to link speed
704 * @autoneg: true when autoneg or autotry is enabled 709 * @autoneg: true when autoneg or autotry is enabled
705 **/ 710 **/
706s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, 711static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
707 ixgbe_link_speed *speed, 712 ixgbe_link_speed *speed,
708 bool *autoneg) 713 bool *autoneg)
709{ 714{
710 /* SFP */ 715 /* SFP */
711 if (hw->phy.media_type == ixgbe_media_type_fiber) { 716 if (hw->phy.media_type == ixgbe_media_type_fiber) {
@@ -740,8 +745,8 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
740 * @device_type: 3 bit device type 745 * @device_type: 3 bit device type
741 * @data: Data to write to the register 746 * @data: Data to write to the register
742 **/ 747 **/
743s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, 748static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
744 u32 device_type, u32 data) 749 u32 device_type, u32 data)
745{ 750{
746 u32 i, command, error; 751 u32 i, command, error;
747 752
@@ -904,7 +909,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
904 * 909 *
905 * Configures the integrated KX4 PHY. 910 * Configures the integrated KX4 PHY.
906 **/ 911 **/
907s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) 912static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
908{ 913{
909 s32 status; 914 s32 status;
910 u32 reg_val; 915 u32 reg_val;
@@ -942,7 +947,7 @@ s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
942 * 947 *
943 * Configures the integrated KR PHY. 948 * Configures the integrated KR PHY.
944 **/ 949 **/
945s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) 950static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
946{ 951{
947 s32 status; 952 s32 status;
948 u32 reg_val; 953 u32 reg_val;
@@ -987,7 +992,7 @@ s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
987 * A return of a non-zero value indicates an error, and the base driver should 992 * A return of a non-zero value indicates an error, and the base driver should
988 * not report link up. 993 * not report link up.
989 **/ 994 **/
990s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) 995static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
991{ 996{
992 u32 status; 997 u32 status;
993 u16 lasi, autoneg_status, speed; 998 u16 lasi, autoneg_status, speed;
@@ -1049,7 +1054,7 @@ s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
1049 * set during init_shared_code because the PHY/SFP type was 1054 * set during init_shared_code because the PHY/SFP type was
1050 * not known. Perform the SFP init if necessary. 1055 * not known. Perform the SFP init if necessary.
1051 **/ 1056 **/
1052s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) 1057static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
1053{ 1058{
1054 struct ixgbe_phy_info *phy = &hw->phy; 1059 struct ixgbe_phy_info *phy = &hw->phy;
1055 s32 ret_val; 1060 s32 ret_val;
@@ -1102,7 +1107,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
1102 * Returns the media type (fiber, copper, backplane) 1107 * Returns the media type (fiber, copper, backplane)
1103 * 1108 *
1104 */ 1109 */
1105enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) 1110static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1106{ 1111{
1107 enum ixgbe_media_type media_type; 1112 enum ixgbe_media_type media_type;
1108 1113
@@ -1129,7 +1134,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1129/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. 1134/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
1130 ** @hw: pointer to hardware structure 1135 ** @hw: pointer to hardware structure
1131 **/ 1136 **/
1132s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) 1137static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
1133{ 1138{
1134 u32 status; 1139 u32 status;
1135 u16 reg; 1140 u16 reg;
@@ -1202,7 +1207,7 @@ s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
1202 ** and clears all interrupts, perform a PHY reset, and perform a link (MAC) 1207 ** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1203 ** reset. 1208 ** reset.
1204 **/ 1209 **/
1205s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) 1210static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
1206{ 1211{
1207 ixgbe_link_speed link_speed; 1212 ixgbe_link_speed link_speed;
1208 s32 status; 1213 s32 status;
@@ -1295,6 +1300,28 @@ mac_reset_top:
1295 return status; 1300 return status;
1296} 1301}
1297 1302
1303/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype
1304 * anti-spoofing
1305 * @hw: pointer to hardware structure
1306 * @enable: enable or disable switch for Ethertype anti-spoofing
1307 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1308 **/
1309void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable,
1310 int vf)
1311{
1312 int vf_target_reg = vf >> 3;
1313 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1314 u32 pfvfspoof;
1315
1316 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1317 if (enable)
1318 pfvfspoof |= (1 << vf_target_shift);
1319 else
1320 pfvfspoof &= ~(1 << vf_target_shift);
1321
1322 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1323}
1324
1298#define X550_COMMON_MAC \ 1325#define X550_COMMON_MAC \
1299 .init_hw = &ixgbe_init_hw_generic, \ 1326 .init_hw = &ixgbe_init_hw_generic, \
1300 .start_hw = &ixgbe_start_hw_X540, \ 1327 .start_hw = &ixgbe_start_hw_X540, \
@@ -1329,6 +1356,8 @@ mac_reset_top:
1329 .init_uta_tables = &ixgbe_init_uta_tables_generic, \ 1356 .init_uta_tables = &ixgbe_init_uta_tables_generic, \
1330 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ 1357 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
1331 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ 1358 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
1359 .set_ethertype_anti_spoofing = \
1360 &ixgbe_set_ethertype_anti_spoofing_X550, \
1332 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \ 1361 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \
1333 .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \ 1362 .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \
1334 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ 1363 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
@@ -1345,7 +1374,6 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
1345 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 1374 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
1346 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, 1375 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
1347 .setup_link = &ixgbe_setup_mac_link_X540, 1376 .setup_link = &ixgbe_setup_mac_link_X540,
1348 .set_rxpba = &ixgbe_set_rxpba_generic,
1349 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, 1377 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
1350 .setup_sfp = NULL, 1378 .setup_sfp = NULL,
1351}; 1379};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 8c44ab25f3fa..3a9b356dff01 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -43,6 +43,13 @@
43#define BP_EXTENDED_STATS 43#define BP_EXTENDED_STATS
44#endif 44#endif
45 45
46#define IXGBE_MAX_TXD_PWR 14
47#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
48
49/* Tx Descriptors needed, worst case */
50#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
51#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
52
46/* wrapper around a pointer to a socket buffer, 53/* wrapper around a pointer to a socket buffer,
47 * so a DMA handle can be stored along with the buffer */ 54 * so a DMA handle can be stored along with the buffer */
48struct ixgbevf_tx_buffer { 55struct ixgbevf_tx_buffer {
@@ -85,6 +92,18 @@ struct ixgbevf_rx_queue_stats {
85 u64 csum_err; 92 u64 csum_err;
86}; 93};
87 94
95enum ixgbevf_ring_state_t {
96 __IXGBEVF_TX_DETECT_HANG,
97 __IXGBEVF_HANG_CHECK_ARMED,
98};
99
100#define check_for_tx_hang(ring) \
101 test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
102#define set_check_for_tx_hang(ring) \
103 set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
104#define clear_check_for_tx_hang(ring) \
105 clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
106
88struct ixgbevf_ring { 107struct ixgbevf_ring {
89 struct ixgbevf_ring *next; 108 struct ixgbevf_ring *next;
90 struct net_device *netdev; 109 struct net_device *netdev;
@@ -101,7 +120,7 @@ struct ixgbevf_ring {
101 struct ixgbevf_tx_buffer *tx_buffer_info; 120 struct ixgbevf_tx_buffer *tx_buffer_info;
102 struct ixgbevf_rx_buffer *rx_buffer_info; 121 struct ixgbevf_rx_buffer *rx_buffer_info;
103 }; 122 };
104 123 unsigned long state;
105 struct ixgbevf_stats stats; 124 struct ixgbevf_stats stats;
106 struct u64_stats_sync syncp; 125 struct u64_stats_sync syncp;
107 union { 126 union {
@@ -124,6 +143,7 @@ struct ixgbevf_ring {
124 143
125#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES 144#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
126#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES 145#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
146#define IXGBEVF_MAX_RSS_QUEUES 2
127 147
128#define IXGBEVF_DEFAULT_TXD 1024 148#define IXGBEVF_DEFAULT_TXD 1024
129#define IXGBEVF_DEFAULT_RXD 512 149#define IXGBEVF_DEFAULT_RXD 512
@@ -347,8 +367,6 @@ struct ixgbevf_adapter {
347 /* this field must be first, see ixgbevf_process_skb_fields */ 367 /* this field must be first, see ixgbevf_process_skb_fields */
348 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 368 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
349 369
350 struct timer_list watchdog_timer;
351 struct work_struct reset_task;
352 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 370 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
353 371
354 /* Interrupt Throttle Rate */ 372 /* Interrupt Throttle Rate */
@@ -378,8 +396,7 @@ struct ixgbevf_adapter {
378 * thus the additional *_CAPABLE flags. 396 * thus the additional *_CAPABLE flags.
379 */ 397 */
380 u32 flags; 398 u32 flags;
381#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1) 399#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1)
382
383#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) 400#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
384 401
385 struct msix_entry *msix_entries; 402 struct msix_entry *msix_entries;
@@ -415,9 +432,11 @@ struct ixgbevf_adapter {
415 u32 link_speed; 432 u32 link_speed;
416 bool link_up; 433 bool link_up;
417 434
418 spinlock_t mbx_lock; 435 struct timer_list service_timer;
436 struct work_struct service_task;
419 437
420 struct work_struct watchdog_task; 438 spinlock_t mbx_lock;
439 unsigned long last_reset;
421}; 440};
422 441
423enum ixbgevf_state_t { 442enum ixbgevf_state_t {
@@ -426,7 +445,8 @@ enum ixbgevf_state_t {
426 __IXGBEVF_DOWN, 445 __IXGBEVF_DOWN,
427 __IXGBEVF_DISABLED, 446 __IXGBEVF_DISABLED,
428 __IXGBEVF_REMOVING, 447 __IXGBEVF_REMOVING,
429 __IXGBEVF_WORK_INIT, 448 __IXGBEVF_SERVICE_SCHED,
449 __IXGBEVF_SERVICE_INITED,
430}; 450};
431 451
432enum ixgbevf_boards { 452enum ixgbevf_boards {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 38c7a0be8197..4186981e562d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -98,6 +98,23 @@ static int debug = -1;
98module_param(debug, int, 0); 98module_param(debug, int, 0);
99MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 99MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
100 100
101static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
102{
103 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
104 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
105 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
106 schedule_work(&adapter->service_task);
107}
108
109static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
110{
111 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
112
113 /* flush memory to make sure state is correct before next watchdog */
114 smp_mb__before_atomic();
115 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
116}
117
101/* forward decls */ 118/* forward decls */
102static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); 119static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
103static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 120static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
@@ -111,8 +128,8 @@ static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
111 return; 128 return;
112 hw->hw_addr = NULL; 129 hw->hw_addr = NULL;
113 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 130 dev_err(&adapter->pdev->dev, "Adapter removed\n");
114 if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 131 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
115 schedule_work(&adapter->watchdog_task); 132 ixgbevf_service_event_schedule(adapter);
116} 133}
117 134
118static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) 135static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
@@ -199,14 +216,72 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
199 /* tx_buffer must be completely set up in the transmit path */ 216 /* tx_buffer must be completely set up in the transmit path */
200} 217}
201 218
202#define IXGBE_MAX_TXD_PWR 14 219static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
203#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 220{
221 return ring->stats.packets;
222}
223
224static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
225{
226 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
227 struct ixgbe_hw *hw = &adapter->hw;
228
229 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
230 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
231
232 if (head != tail)
233 return (head < tail) ?
234 tail - head : (tail + ring->count - head);
235
236 return 0;
237}
238
239static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
240{
241 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
242 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
243 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
244
245 clear_check_for_tx_hang(tx_ring);
246
247 /* Check for a hung queue, but be thorough. This verifies
248 * that a transmit has been completed since the previous
249 * check AND there is at least one packet pending. The
250 * ARMED bit is set to indicate a potential hang.
251 */
252 if ((tx_done_old == tx_done) && tx_pending) {
253 /* make sure it is true for two checks in a row */
254 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
255 &tx_ring->state);
256 }
257 /* reset the countdown */
258 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
259
260 /* update completed stats and continue */
261 tx_ring->tx_stats.tx_done_old = tx_done;
262
263 return false;
264}
265
266static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
267{
268 /* Do the reset outside of interrupt context */
269 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
270 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
271 ixgbevf_service_event_schedule(adapter);
272 }
273}
204 274
205/* Tx Descriptors needed, worst case */ 275/**
206#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 276 * ixgbevf_tx_timeout - Respond to a Tx Hang
207#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 277 * @netdev: network interface device structure
278 **/
279static void ixgbevf_tx_timeout(struct net_device *netdev)
280{
281 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
208 282
209static void ixgbevf_tx_timeout(struct net_device *netdev); 283 ixgbevf_tx_timeout_reset(adapter);
284}
210 285
211/** 286/**
212 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 287 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
@@ -311,6 +386,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
311 q_vector->tx.total_bytes += total_bytes; 386 q_vector->tx.total_bytes += total_bytes;
312 q_vector->tx.total_packets += total_packets; 387 q_vector->tx.total_packets += total_packets;
313 388
389 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
390 struct ixgbe_hw *hw = &adapter->hw;
391 union ixgbe_adv_tx_desc *eop_desc;
392
393 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
394
395 pr_err("Detected Tx Unit Hang\n"
396 " Tx Queue <%d>\n"
397 " TDH, TDT <%x>, <%x>\n"
398 " next_to_use <%x>\n"
399 " next_to_clean <%x>\n"
400 "tx_buffer_info[next_to_clean]\n"
401 " next_to_watch <%p>\n"
402 " eop_desc->wb.status <%x>\n"
403 " time_stamp <%lx>\n"
404 " jiffies <%lx>\n",
405 tx_ring->queue_index,
406 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
407 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
408 tx_ring->next_to_use, i,
409 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
410 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
411
412 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
413
414 /* schedule immediate reset if we believe we hung */
415 ixgbevf_tx_timeout_reset(adapter);
416
417 return true;
418 }
419
314#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 420#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
315 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 421 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
316 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 422 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -1158,9 +1264,7 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1158 1264
1159 hw->mac.get_link_status = 1; 1265 hw->mac.get_link_status = 1;
1160 1266
1161 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && 1267 ixgbevf_service_event_schedule(adapter);
1162 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1163 mod_timer(&adapter->watchdog_timer, jiffies);
1164 1268
1165 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 1269 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1166 1270
@@ -1479,6 +1583,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1479 txdctl |= (1 << 8) | /* HTHRESH = 1 */ 1583 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1480 32; /* PTHRESH = 32 */ 1584 32; /* PTHRESH = 32 */
1481 1585
1586 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1587
1482 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); 1588 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1483 1589
1484 /* poll to verify queue is enabled */ 1590 /* poll to verify queue is enabled */
@@ -1584,6 +1690,39 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1584 reg_idx); 1690 reg_idx);
1585} 1691}
1586 1692
1693static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1694{
1695 struct ixgbe_hw *hw = &adapter->hw;
1696 u32 vfmrqc = 0, vfreta = 0;
1697 u32 rss_key[10];
1698 u16 rss_i = adapter->num_rx_queues;
1699 int i, j;
1700
1701 /* Fill out hash function seeds */
1702 netdev_rss_key_fill(rss_key, sizeof(rss_key));
1703 for (i = 0; i < 10; i++)
1704 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1705
1706 /* Fill out redirection table */
1707 for (i = 0, j = 0; i < 64; i++, j++) {
1708 if (j == rss_i)
1709 j = 0;
1710 vfreta = (vfreta << 8) | (j * 0x1);
1711 if ((i & 3) == 3)
1712 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1713 }
1714
1715 /* Perform hash on these packet types */
1716 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1717 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1718 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1719 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1720
1721 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1722
1723 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1724}
1725
1587static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, 1726static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1588 struct ixgbevf_ring *ring) 1727 struct ixgbevf_ring *ring)
1589{ 1728{
@@ -1640,6 +1779,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1640 struct net_device *netdev = adapter->netdev; 1779 struct net_device *netdev = adapter->netdev;
1641 1780
1642 ixgbevf_setup_psrtype(adapter); 1781 ixgbevf_setup_psrtype(adapter);
1782 if (hw->mac.type >= ixgbe_mac_X550_vf)
1783 ixgbevf_setup_vfmrqc(adapter);
1643 1784
1644 /* notify the PF of our intent to use this size of frame */ 1785 /* notify the PF of our intent to use this size of frame */
1645 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); 1786 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
@@ -1794,7 +1935,8 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1794 struct ixgbe_hw *hw = &adapter->hw; 1935 struct ixgbe_hw *hw = &adapter->hw;
1795 unsigned int def_q = 0; 1936 unsigned int def_q = 0;
1796 unsigned int num_tcs = 0; 1937 unsigned int num_tcs = 0;
1797 unsigned int num_rx_queues = 1; 1938 unsigned int num_rx_queues = adapter->num_rx_queues;
1939 unsigned int num_tx_queues = adapter->num_tx_queues;
1798 int err; 1940 int err;
1799 1941
1800 spin_lock_bh(&adapter->mbx_lock); 1942 spin_lock_bh(&adapter->mbx_lock);
@@ -1808,6 +1950,9 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1808 return err; 1950 return err;
1809 1951
1810 if (num_tcs > 1) { 1952 if (num_tcs > 1) {
1953 /* we need only one Tx queue */
1954 num_tx_queues = 1;
1955
1811 /* update default Tx ring register index */ 1956 /* update default Tx ring register index */
1812 adapter->tx_ring[0]->reg_idx = def_q; 1957 adapter->tx_ring[0]->reg_idx = def_q;
1813 1958
@@ -1816,7 +1961,8 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1816 } 1961 }
1817 1962
1818 /* if we have a bad config abort request queue reset */ 1963 /* if we have a bad config abort request queue reset */
1819 if (adapter->num_rx_queues != num_rx_queues) { 1964 if ((adapter->num_rx_queues != num_rx_queues) ||
1965 (adapter->num_tx_queues != num_tx_queues)) {
1820 /* force mailbox timeout to prevent further messages */ 1966 /* force mailbox timeout to prevent further messages */
1821 hw->mbx.timeout = 0; 1967 hw->mbx.timeout = 0;
1822 1968
@@ -1917,6 +2063,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1917 clear_bit(__IXGBEVF_DOWN, &adapter->state); 2063 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1918 ixgbevf_napi_enable_all(adapter); 2064 ixgbevf_napi_enable_all(adapter);
1919 2065
2066 /* clear any pending interrupts, may auto mask */
2067 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2068 ixgbevf_irq_enable(adapter);
2069
1920 /* enable transmits */ 2070 /* enable transmits */
1921 netif_tx_start_all_queues(netdev); 2071 netif_tx_start_all_queues(netdev);
1922 2072
@@ -1924,21 +2074,14 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1924 ixgbevf_init_last_counter_stats(adapter); 2074 ixgbevf_init_last_counter_stats(adapter);
1925 2075
1926 hw->mac.get_link_status = 1; 2076 hw->mac.get_link_status = 1;
1927 mod_timer(&adapter->watchdog_timer, jiffies); 2077 mod_timer(&adapter->service_timer, jiffies);
1928} 2078}
1929 2079
1930void ixgbevf_up(struct ixgbevf_adapter *adapter) 2080void ixgbevf_up(struct ixgbevf_adapter *adapter)
1931{ 2081{
1932 struct ixgbe_hw *hw = &adapter->hw;
1933
1934 ixgbevf_configure(adapter); 2082 ixgbevf_configure(adapter);
1935 2083
1936 ixgbevf_up_complete(adapter); 2084 ixgbevf_up_complete(adapter);
1937
1938 /* clear any pending interrupts, may auto mask */
1939 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1940
1941 ixgbevf_irq_enable(adapter);
1942} 2085}
1943 2086
1944/** 2087/**
@@ -2045,22 +2188,19 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
2045 for (i = 0; i < adapter->num_rx_queues; i++) 2188 for (i = 0; i < adapter->num_rx_queues; i++)
2046 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); 2189 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2047 2190
2048 netif_tx_disable(netdev); 2191 usleep_range(10000, 20000);
2049
2050 msleep(10);
2051 2192
2052 netif_tx_stop_all_queues(netdev); 2193 netif_tx_stop_all_queues(netdev);
2053 2194
2195 /* call carrier off first to avoid false dev_watchdog timeouts */
2196 netif_carrier_off(netdev);
2197 netif_tx_disable(netdev);
2198
2054 ixgbevf_irq_disable(adapter); 2199 ixgbevf_irq_disable(adapter);
2055 2200
2056 ixgbevf_napi_disable_all(adapter); 2201 ixgbevf_napi_disable_all(adapter);
2057 2202
2058 del_timer_sync(&adapter->watchdog_timer); 2203 del_timer_sync(&adapter->service_timer);
2059 /* can't call flush scheduled work here because it can deadlock
2060 * if linkwatch_event tries to acquire the rtnl_lock which we are
2061 * holding */
2062 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
2063 msleep(1);
2064 2204
2065 /* disable transmits in the hardware now that interrupts are off */ 2205 /* disable transmits in the hardware now that interrupts are off */
2066 for (i = 0; i < adapter->num_tx_queues; i++) { 2206 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -2070,8 +2210,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
2070 IXGBE_TXDCTL_SWFLSH); 2210 IXGBE_TXDCTL_SWFLSH);
2071 } 2211 }
2072 2212
2073 netif_carrier_off(netdev);
2074
2075 if (!pci_channel_offline(adapter->pdev)) 2213 if (!pci_channel_offline(adapter->pdev))
2076 ixgbevf_reset(adapter); 2214 ixgbevf_reset(adapter);
2077 2215
@@ -2110,6 +2248,8 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2110 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 2248 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
2111 netdev->addr_len); 2249 netdev->addr_len);
2112 } 2250 }
2251
2252 adapter->last_reset = jiffies;
2113} 2253}
2114 2254
2115static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 2255static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
@@ -2181,8 +2321,19 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2181 return; 2321 return;
2182 2322
2183 /* we need as many queues as traffic classes */ 2323 /* we need as many queues as traffic classes */
2184 if (num_tcs > 1) 2324 if (num_tcs > 1) {
2185 adapter->num_rx_queues = num_tcs; 2325 adapter->num_rx_queues = num_tcs;
2326 } else {
2327 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2328
2329 switch (hw->api_version) {
2330 case ixgbe_mbox_api_11:
2331 adapter->num_rx_queues = rss;
2332 adapter->num_tx_queues = rss;
2333 default:
2334 break;
2335 }
2336 }
2186} 2337}
2187 2338
2188/** 2339/**
@@ -2552,7 +2703,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2552 struct ixgbe_hw *hw = &adapter->hw; 2703 struct ixgbe_hw *hw = &adapter->hw;
2553 int i; 2704 int i;
2554 2705
2555 if (!adapter->link_up) 2706 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2707 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2556 return; 2708 return;
2557 2709
2558 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2710 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
@@ -2576,79 +2728,176 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2576} 2728}
2577 2729
2578/** 2730/**
2579 * ixgbevf_watchdog - Timer Call-back 2731 * ixgbevf_service_timer - Timer Call-back
2580 * @data: pointer to adapter cast into an unsigned long 2732 * @data: pointer to adapter cast into an unsigned long
2581 **/ 2733 **/
2582static void ixgbevf_watchdog(unsigned long data) 2734static void ixgbevf_service_timer(unsigned long data)
2583{ 2735{
2584 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2736 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2737
2738 /* Reset the timer */
2739 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
2740
2741 ixgbevf_service_event_schedule(adapter);
2742}
2743
2744static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
2745{
2746 if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
2747 return;
2748
2749 adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
2750
2751 /* If we're already down or resetting, just bail */
2752 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2753 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2754 return;
2755
2756 adapter->tx_timeout_count++;
2757
2758 ixgbevf_reinit_locked(adapter);
2759}
2760
2761/* ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2762 * @adapter - pointer to the device adapter structure
2763 *
2764 * This function serves two purposes. First it strobes the interrupt lines
2765 * in order to make certain interrupts are occurring. Secondly it sets the
2766 * bits needed to check for TX hangs. As a result we should immediately
2767 * determine if a hang has occurred.
2768 */
2769static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
2770{
2585 struct ixgbe_hw *hw = &adapter->hw; 2771 struct ixgbe_hw *hw = &adapter->hw;
2586 u32 eics = 0; 2772 u32 eics = 0;
2587 int i; 2773 int i;
2588 2774
2589 /* 2775 /* If we're down or resetting, just bail */
2590 * Do the watchdog outside of interrupt context due to the lovely 2776 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2591 * delays that some of the newer hardware requires 2777 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2592 */ 2778 return;
2593 2779
2594 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2780 /* Force detection of hung controller */
2595 goto watchdog_short_circuit; 2781 if (netif_carrier_ok(adapter->netdev)) {
2782 for (i = 0; i < adapter->num_tx_queues; i++)
2783 set_check_for_tx_hang(adapter->tx_ring[i]);
2784 }
2596 2785
2597 /* get one bit for every active tx/rx interrupt vector */ 2786 /* get one bit for every active tx/rx interrupt vector */
2598 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2787 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2599 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2788 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2789
2600 if (qv->rx.ring || qv->tx.ring) 2790 if (qv->rx.ring || qv->tx.ring)
2601 eics |= 1 << i; 2791 eics |= 1 << i;
2602 } 2792 }
2603 2793
2794 /* Cause software interrupt to ensure rings are cleaned */
2604 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2795 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2796}
2605 2797
2606watchdog_short_circuit: 2798/**
2607 schedule_work(&adapter->watchdog_task); 2799 * ixgbevf_watchdog_update_link - update the link status
2800 * @adapter - pointer to the device adapter structure
2801 **/
2802static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
2803{
2804 struct ixgbe_hw *hw = &adapter->hw;
2805 u32 link_speed = adapter->link_speed;
2806 bool link_up = adapter->link_up;
2807 s32 err;
2808
2809 spin_lock_bh(&adapter->mbx_lock);
2810
2811 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2812
2813 spin_unlock_bh(&adapter->mbx_lock);
2814
2815 /* if check for link returns error we will need to reset */
2816 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
2817 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
2818 link_up = false;
2819 }
2820
2821 adapter->link_up = link_up;
2822 adapter->link_speed = link_speed;
2608} 2823}
2609 2824
2610/** 2825/**
2611 * ixgbevf_tx_timeout - Respond to a Tx Hang 2826 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2612 * @netdev: network interface device structure 2827 * print link up message
2828 * @adapter - pointer to the device adapter structure
2613 **/ 2829 **/
2614static void ixgbevf_tx_timeout(struct net_device *netdev) 2830static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
2615{ 2831{
2616 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2832 struct net_device *netdev = adapter->netdev;
2617 2833
2618 /* Do the reset outside of interrupt context */ 2834 /* only continue if link was previously down */
2619 schedule_work(&adapter->reset_task); 2835 if (netif_carrier_ok(netdev))
2836 return;
2837
2838 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
2839 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2840 "10 Gbps" :
2841 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
2842 "1 Gbps" :
2843 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
2844 "100 Mbps" :
2845 "unknown speed");
2846
2847 netif_carrier_on(netdev);
2620} 2848}
2621 2849
2622static void ixgbevf_reset_task(struct work_struct *work) 2850/**
2851 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2852 * print link down message
2853 * @adapter - pointer to the adapter structure
2854 **/
2855static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
2623{ 2856{
2624 struct ixgbevf_adapter *adapter; 2857 struct net_device *netdev = adapter->netdev;
2625 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2626 2858
2627 /* If we're already down or resetting, just bail */ 2859 adapter->link_speed = 0;
2860
2861 /* only continue if link was up previously */
2862 if (!netif_carrier_ok(netdev))
2863 return;
2864
2865 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2866
2867 netif_carrier_off(netdev);
2868}
2869
2870/**
2871 * ixgbevf_watchdog_subtask - worker thread to bring link up
2872 * @work: pointer to work_struct containing our data
2873 **/
2874static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
2875{
2876 /* if interface is down do nothing */
2628 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2877 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2629 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
2630 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2878 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2631 return; 2879 return;
2632 2880
2633 adapter->tx_timeout_count++; 2881 ixgbevf_watchdog_update_link(adapter);
2634 2882
2635 ixgbevf_reinit_locked(adapter); 2883 if (adapter->link_up)
2884 ixgbevf_watchdog_link_is_up(adapter);
2885 else
2886 ixgbevf_watchdog_link_is_down(adapter);
2887
2888 ixgbevf_update_stats(adapter);
2636} 2889}
2637 2890
2638/** 2891/**
2639 * ixgbevf_watchdog_task - worker thread to bring link up 2892 * ixgbevf_service_task - manages and runs subtasks
2640 * @work: pointer to work_struct containing our data 2893 * @work: pointer to work_struct containing our data
2641 **/ 2894 **/
2642static void ixgbevf_watchdog_task(struct work_struct *work) 2895static void ixgbevf_service_task(struct work_struct *work)
2643{ 2896{
2644 struct ixgbevf_adapter *adapter = container_of(work, 2897 struct ixgbevf_adapter *adapter = container_of(work,
2645 struct ixgbevf_adapter, 2898 struct ixgbevf_adapter,
2646 watchdog_task); 2899 service_task);
2647 struct net_device *netdev = adapter->netdev;
2648 struct ixgbe_hw *hw = &adapter->hw; 2900 struct ixgbe_hw *hw = &adapter->hw;
2649 u32 link_speed = adapter->link_speed;
2650 bool link_up = adapter->link_up;
2651 s32 need_reset;
2652 2901
2653 if (IXGBE_REMOVED(hw->hw_addr)) { 2902 if (IXGBE_REMOVED(hw->hw_addr)) {
2654 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { 2903 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
@@ -2658,73 +2907,13 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2658 } 2907 }
2659 return; 2908 return;
2660 } 2909 }
2661 ixgbevf_queue_reset_subtask(adapter);
2662
2663 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2664
2665 /*
2666 * Always check the link on the watchdog because we have
2667 * no LSC interrupt
2668 */
2669 spin_lock_bh(&adapter->mbx_lock);
2670
2671 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2672
2673 spin_unlock_bh(&adapter->mbx_lock);
2674
2675 if (need_reset) {
2676 adapter->link_up = link_up;
2677 adapter->link_speed = link_speed;
2678 netif_carrier_off(netdev);
2679 netif_tx_stop_all_queues(netdev);
2680 schedule_work(&adapter->reset_task);
2681 goto pf_has_reset;
2682 }
2683 adapter->link_up = link_up;
2684 adapter->link_speed = link_speed;
2685
2686 if (link_up) {
2687 if (!netif_carrier_ok(netdev)) {
2688 char *link_speed_string;
2689 switch (link_speed) {
2690 case IXGBE_LINK_SPEED_10GB_FULL:
2691 link_speed_string = "10 Gbps";
2692 break;
2693 case IXGBE_LINK_SPEED_1GB_FULL:
2694 link_speed_string = "1 Gbps";
2695 break;
2696 case IXGBE_LINK_SPEED_100_FULL:
2697 link_speed_string = "100 Mbps";
2698 break;
2699 default:
2700 link_speed_string = "unknown speed";
2701 break;
2702 }
2703 dev_info(&adapter->pdev->dev,
2704 "NIC Link is Up, %s\n", link_speed_string);
2705 netif_carrier_on(netdev);
2706 netif_tx_wake_all_queues(netdev);
2707 }
2708 } else {
2709 adapter->link_up = false;
2710 adapter->link_speed = 0;
2711 if (netif_carrier_ok(netdev)) {
2712 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2713 netif_carrier_off(netdev);
2714 netif_tx_stop_all_queues(netdev);
2715 }
2716 }
2717 2910
2718 ixgbevf_update_stats(adapter); 2911 ixgbevf_queue_reset_subtask(adapter);
2719 2912 ixgbevf_reset_subtask(adapter);
2720pf_has_reset: 2913 ixgbevf_watchdog_subtask(adapter);
2721 /* Reset the timer */ 2914 ixgbevf_check_hang_subtask(adapter);
2722 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2723 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
2724 mod_timer(&adapter->watchdog_timer,
2725 round_jiffies(jiffies + (2 * HZ)));
2726 2915
2727 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2916 ixgbevf_service_event_complete(adapter);
2728} 2917}
2729 2918
2730/** 2919/**
@@ -2944,10 +3133,6 @@ static int ixgbevf_open(struct net_device *netdev)
2944 if (!adapter->num_msix_vectors) 3133 if (!adapter->num_msix_vectors)
2945 return -ENOMEM; 3134 return -ENOMEM;
2946 3135
2947 /* disallow open during test */
2948 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2949 return -EBUSY;
2950
2951 if (hw->adapter_stopped) { 3136 if (hw->adapter_stopped) {
2952 ixgbevf_reset(adapter); 3137 ixgbevf_reset(adapter);
2953 /* if adapter is still stopped then PF isn't up and 3138 /* if adapter is still stopped then PF isn't up and
@@ -2960,6 +3145,12 @@ static int ixgbevf_open(struct net_device *netdev)
2960 } 3145 }
2961 } 3146 }
2962 3147
3148 /* disallow open during test */
3149 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3150 return -EBUSY;
3151
3152 netif_carrier_off(netdev);
3153
2963 /* allocate transmit descriptors */ 3154 /* allocate transmit descriptors */
2964 err = ixgbevf_setup_all_tx_resources(adapter); 3155 err = ixgbevf_setup_all_tx_resources(adapter);
2965 if (err) 3156 if (err)
@@ -2979,15 +3170,11 @@ static int ixgbevf_open(struct net_device *netdev)
2979 */ 3170 */
2980 ixgbevf_map_rings_to_vectors(adapter); 3171 ixgbevf_map_rings_to_vectors(adapter);
2981 3172
2982 ixgbevf_up_complete(adapter);
2983
2984 /* clear any pending interrupts, may auto mask */
2985 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2986 err = ixgbevf_request_irq(adapter); 3173 err = ixgbevf_request_irq(adapter);
2987 if (err) 3174 if (err)
2988 goto err_req_irq; 3175 goto err_req_irq;
2989 3176
2990 ixgbevf_irq_enable(adapter); 3177 ixgbevf_up_complete(adapter);
2991 3178
2992 return 0; 3179 return 0;
2993 3180
@@ -3452,8 +3639,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3452 first->bytecount = skb->len; 3639 first->bytecount = skb->len;
3453 first->gso_segs = 1; 3640 first->gso_segs = 1;
3454 3641
3455 if (vlan_tx_tag_present(skb)) { 3642 if (skb_vlan_tag_present(skb)) {
3456 tx_flags |= vlan_tx_tag_get(skb); 3643 tx_flags |= skb_vlan_tag_get(skb);
3457 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3644 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3458 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3645 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3459 } 3646 }
@@ -3822,28 +4009,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3822 NETIF_F_HW_VLAN_CTAG_RX | 4009 NETIF_F_HW_VLAN_CTAG_RX |
3823 NETIF_F_HW_VLAN_CTAG_FILTER; 4010 NETIF_F_HW_VLAN_CTAG_FILTER;
3824 4011
3825 netdev->vlan_features |= NETIF_F_TSO; 4012 netdev->vlan_features |= NETIF_F_TSO |
3826 netdev->vlan_features |= NETIF_F_TSO6; 4013 NETIF_F_TSO6 |
3827 netdev->vlan_features |= NETIF_F_IP_CSUM; 4014 NETIF_F_IP_CSUM |
3828 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 4015 NETIF_F_IPV6_CSUM |
3829 netdev->vlan_features |= NETIF_F_SG; 4016 NETIF_F_SG;
3830 4017
3831 if (pci_using_dac) 4018 if (pci_using_dac)
3832 netdev->features |= NETIF_F_HIGHDMA; 4019 netdev->features |= NETIF_F_HIGHDMA;
3833 4020
3834 netdev->priv_flags |= IFF_UNICAST_FLT; 4021 netdev->priv_flags |= IFF_UNICAST_FLT;
3835 4022
3836 init_timer(&adapter->watchdog_timer);
3837 adapter->watchdog_timer.function = ixgbevf_watchdog;
3838 adapter->watchdog_timer.data = (unsigned long)adapter;
3839
3840 if (IXGBE_REMOVED(hw->hw_addr)) { 4023 if (IXGBE_REMOVED(hw->hw_addr)) {
3841 err = -EIO; 4024 err = -EIO;
3842 goto err_sw_init; 4025 goto err_sw_init;
3843 } 4026 }
3844 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 4027
3845 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 4028 setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
3846 set_bit(__IXGBEVF_WORK_INIT, &adapter->state); 4029 (unsigned long)adapter);
4030
4031 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4032 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4033 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
3847 4034
3848 err = ixgbevf_init_interrupt_scheme(adapter); 4035 err = ixgbevf_init_interrupt_scheme(adapter);
3849 if (err) 4036 if (err)
@@ -3917,11 +4104,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
3917 adapter = netdev_priv(netdev); 4104 adapter = netdev_priv(netdev);
3918 4105
3919 set_bit(__IXGBEVF_REMOVING, &adapter->state); 4106 set_bit(__IXGBEVF_REMOVING, &adapter->state);
3920 4107 cancel_work_sync(&adapter->service_task);
3921 del_timer_sync(&adapter->watchdog_timer);
3922
3923 cancel_work_sync(&adapter->reset_task);
3924 cancel_work_sync(&adapter->watchdog_task);
3925 4108
3926 if (netdev->reg_state == NETREG_REGISTERED) 4109 if (netdev->reg_state == NETREG_REGISTERED)
3927 unregister_netdev(netdev); 4110 unregister_netdev(netdev);
@@ -3955,7 +4138,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3955 struct net_device *netdev = pci_get_drvdata(pdev); 4138 struct net_device *netdev = pci_get_drvdata(pdev);
3956 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4139 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3957 4140
3958 if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 4141 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
3959 return PCI_ERS_RESULT_DISCONNECT; 4142 return PCI_ERS_RESULT_DISCONNECT;
3960 4143
3961 rtnl_lock(); 4144 rtnl_lock();
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 09dd8f698bea..3e712fd6e695 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -69,6 +69,16 @@
69#define IXGBE_VFGOTC_LSB 0x02020 69#define IXGBE_VFGOTC_LSB 0x02020
70#define IXGBE_VFGOTC_MSB 0x02024 70#define IXGBE_VFGOTC_MSB 0x02024
71#define IXGBE_VFMPRC 0x01034 71#define IXGBE_VFMPRC 0x01034
72#define IXGBE_VFMRQC 0x3000
73#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4))
74#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4))
75
76/* VFMRQC bits */
77#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */
78#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000
79#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000
80#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000
81#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000
72 82
73#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) 83#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
74 84
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 44ce7d88f554..6e9a792097d3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2154,9 +2154,9 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
2154static inline void 2154static inline void
2155jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) 2155jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
2156{ 2156{
2157 if (vlan_tx_tag_present(skb)) { 2157 if (skb_vlan_tag_present(skb)) {
2158 *flags |= TXFLAG_TAGON; 2158 *flags |= TXFLAG_TAGON;
2159 *vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 2159 *vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2160 } 2160 }
2161} 2161}
2162 2162
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 867a6a3ef81f..d9f4498832a1 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1895,14 +1895,14 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1895 ctrl = 0; 1895 ctrl = 0;
1896 1896
1897 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ 1897 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1898 if (vlan_tx_tag_present(skb)) { 1898 if (skb_vlan_tag_present(skb)) {
1899 if (!le) { 1899 if (!le) {
1900 le = get_tx_le(sky2, &slot); 1900 le = get_tx_le(sky2, &slot);
1901 le->addr = 0; 1901 le->addr = 0;
1902 le->opcode = OP_VLAN|HW_OWNER; 1902 le->opcode = OP_VLAN|HW_OWNER;
1903 } else 1903 } else
1904 le->opcode |= OP_VLAN; 1904 le->opcode |= OP_VLAN;
1905 le->length = cpu_to_be16(vlan_tx_tag_get(skb)); 1905 le->length = cpu_to_be16(skb_vlan_tag_get(skb));
1906 ctrl |= INS_VLAN; 1906 ctrl |= INS_VLAN;
1907 } 1907 }
1908 1908
@@ -2594,7 +2594,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2594 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; 2594 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
2595 prefetch(sky2->rx_ring + sky2->rx_next); 2595 prefetch(sky2->rx_ring + sky2->rx_next);
2596 2596
2597 if (vlan_tx_tag_present(re->skb)) 2597 if (skb_vlan_tag_present(re->skb))
2598 count -= VLAN_HLEN; /* Account for vlan tag */ 2598 count -= VLAN_HLEN; /* Account for vlan tag */
2599 2599
2600 /* This chip has hardware problems that generates bogus status. 2600 /* This chip has hardware problems that generates bogus status.
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 963dd7e6d547..0c51c69f802f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -592,7 +592,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
592 buf->nbufs = 1; 592 buf->nbufs = 1;
593 buf->npages = 1; 593 buf->npages = 1;
594 buf->page_shift = get_order(size) + PAGE_SHIFT; 594 buf->page_shift = get_order(size) + PAGE_SHIFT;
595 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, 595 buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev,
596 size, &t, gfp); 596 size, &t, gfp);
597 if (!buf->direct.buf) 597 if (!buf->direct.buf)
598 return -ENOMEM; 598 return -ENOMEM;
@@ -619,7 +619,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
619 619
620 for (i = 0; i < buf->nbufs; ++i) { 620 for (i = 0; i < buf->nbufs; ++i) {
621 buf->page_list[i].buf = 621 buf->page_list[i].buf =
622 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, 622 dma_alloc_coherent(&dev->persist->pdev->dev,
623 PAGE_SIZE,
623 &t, gfp); 624 &t, gfp);
624 if (!buf->page_list[i].buf) 625 if (!buf->page_list[i].buf)
625 goto err_free; 626 goto err_free;
@@ -657,15 +658,17 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
657 int i; 658 int i;
658 659
659 if (buf->nbufs == 1) 660 if (buf->nbufs == 1)
660 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 661 dma_free_coherent(&dev->persist->pdev->dev, size,
662 buf->direct.buf,
661 buf->direct.map); 663 buf->direct.map);
662 else { 664 else {
663 if (BITS_PER_LONG == 64 && buf->direct.buf) 665 if (BITS_PER_LONG == 64)
664 vunmap(buf->direct.buf); 666 vunmap(buf->direct.buf);
665 667
666 for (i = 0; i < buf->nbufs; ++i) 668 for (i = 0; i < buf->nbufs; ++i)
667 if (buf->page_list[i].buf) 669 if (buf->page_list[i].buf)
668 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 670 dma_free_coherent(&dev->persist->pdev->dev,
671 PAGE_SIZE,
669 buf->page_list[i].buf, 672 buf->page_list[i].buf,
670 buf->page_list[i].map); 673 buf->page_list[i].map);
671 kfree(buf->page_list); 674 kfree(buf->page_list);
@@ -738,7 +741,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp
738 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) 741 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
739 goto out; 742 goto out;
740 743
741 pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev), gfp); 744 pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp);
742 if (!pgdir) { 745 if (!pgdir) {
743 ret = -ENOMEM; 746 ret = -ENOMEM;
744 goto out; 747 goto out;
@@ -775,7 +778,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
775 set_bit(i, db->u.pgdir->bits[o]); 778 set_bit(i, db->u.pgdir->bits[o]);
776 779
777 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { 780 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
778 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 781 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
779 db->u.pgdir->db_page, db->u.pgdir->db_dma); 782 db->u.pgdir->db_page, db->u.pgdir->db_dma);
780 list_del(&db->u.pgdir->list); 783 list_del(&db->u.pgdir->list);
781 kfree(db->u.pgdir); 784 kfree(db->u.pgdir);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 9c656fe4983d..715de8affcc9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -40,16 +40,177 @@ enum {
40 MLX4_CATAS_POLL_INTERVAL = 5 * HZ, 40 MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
41}; 41};
42 42
43static DEFINE_SPINLOCK(catas_lock);
44 43
45static LIST_HEAD(catas_list);
46static struct work_struct catas_work;
47 44
48static int internal_err_reset = 1; 45int mlx4_internal_err_reset = 1;
49module_param(internal_err_reset, int, 0644); 46module_param_named(internal_err_reset, mlx4_internal_err_reset, int, 0644);
50MODULE_PARM_DESC(internal_err_reset, 47MODULE_PARM_DESC(internal_err_reset,
51 "Reset device on internal errors if non-zero" 48 "Reset device on internal errors if non-zero (default 1)");
52 " (default 1, in SRIOV mode default is 0)"); 49
50static int read_vendor_id(struct mlx4_dev *dev)
51{
52 u16 vendor_id = 0;
53 int ret;
54
55 ret = pci_read_config_word(dev->persist->pdev, 0, &vendor_id);
56 if (ret) {
57 mlx4_err(dev, "Failed to read vendor ID, ret=%d\n", ret);
58 return ret;
59 }
60
61 if (vendor_id == 0xffff) {
62 mlx4_err(dev, "PCI can't be accessed to read vendor id\n");
63 return -EINVAL;
64 }
65
66 return 0;
67}
68
69static int mlx4_reset_master(struct mlx4_dev *dev)
70{
71 int err = 0;
72
73 if (mlx4_is_master(dev))
74 mlx4_report_internal_err_comm_event(dev);
75
76 if (!pci_channel_offline(dev->persist->pdev)) {
77 err = read_vendor_id(dev);
78 /* If PCI can't be accessed to read vendor ID we assume that its
79 * link was disabled and chip was already reset.
80 */
81 if (err)
82 return 0;
83
84 err = mlx4_reset(dev);
85 if (err)
86 mlx4_err(dev, "Fail to reset HCA\n");
87 }
88
89 return err;
90}
91
92static int mlx4_reset_slave(struct mlx4_dev *dev)
93{
94#define COM_CHAN_RST_REQ_OFFSET 0x10
95#define COM_CHAN_RST_ACK_OFFSET 0x08
96
97 u32 comm_flags;
98 u32 rst_req;
99 u32 rst_ack;
100 unsigned long end;
101 struct mlx4_priv *priv = mlx4_priv(dev);
102
103 if (pci_channel_offline(dev->persist->pdev))
104 return 0;
105
106 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
107 MLX4_COMM_CHAN_FLAGS));
108 if (comm_flags == 0xffffffff) {
109 mlx4_err(dev, "VF reset is not needed\n");
110 return 0;
111 }
112
113 if (!(dev->caps.vf_caps & MLX4_VF_CAP_FLAG_RESET)) {
114 mlx4_err(dev, "VF reset is not supported\n");
115 return -EOPNOTSUPP;
116 }
117
118 rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >>
119 COM_CHAN_RST_REQ_OFFSET;
120 rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >>
121 COM_CHAN_RST_ACK_OFFSET;
122 if (rst_req != rst_ack) {
123 mlx4_err(dev, "Communication channel isn't sync, fail to send reset\n");
124 return -EIO;
125 }
126
127 rst_req ^= 1;
128 mlx4_warn(dev, "VF is sending reset request to Firmware\n");
129 comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET;
130 __raw_writel((__force u32)cpu_to_be32(comm_flags),
131 (__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS);
132 /* Make sure that our comm channel write doesn't
133 * get mixed in with writes from another CPU.
134 */
135 mmiowb();
136
137 end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies;
138 while (time_before(jiffies, end)) {
139 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
140 MLX4_COMM_CHAN_FLAGS));
141 rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >>
142 COM_CHAN_RST_ACK_OFFSET;
143
144 /* Reading rst_req again since the communication channel can
145 * be reset at any time by the PF and all its bits will be
146 * set to zero.
147 */
148 rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >>
149 COM_CHAN_RST_REQ_OFFSET;
150
151 if (rst_ack == rst_req) {
152 mlx4_warn(dev, "VF Reset succeed\n");
153 return 0;
154 }
155 cond_resched();
156 }
157 mlx4_err(dev, "Fail to send reset over the communication channel\n");
158 return -ETIMEDOUT;
159}
160
161static int mlx4_comm_internal_err(u32 slave_read)
162{
163 return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
164 (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
165}
166
167void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
168{
169 int err;
170 struct mlx4_dev *dev;
171
172 if (!mlx4_internal_err_reset)
173 return;
174
175 mutex_lock(&persist->device_state_mutex);
176 if (persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
177 goto out;
178
179 dev = persist->dev;
180 mlx4_err(dev, "device is going to be reset\n");
181 if (mlx4_is_slave(dev))
182 err = mlx4_reset_slave(dev);
183 else
184 err = mlx4_reset_master(dev);
185 BUG_ON(err != 0);
186
187 dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
188 mlx4_err(dev, "device was reset successfully\n");
189 mutex_unlock(&persist->device_state_mutex);
190
191 /* At that step HW was already reset, now notify clients */
192 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
193 mlx4_cmd_wake_completions(dev);
194 return;
195
196out:
197 mutex_unlock(&persist->device_state_mutex);
198}
199
200static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
201{
202 int err = 0;
203
204 mlx4_enter_error_state(persist);
205 mutex_lock(&persist->interface_state_mutex);
206 if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
207 !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
208 err = mlx4_restart_one(persist->pdev);
209 mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n",
210 err);
211 }
212 mutex_unlock(&persist->interface_state_mutex);
213}
53 214
54static void dump_err_buf(struct mlx4_dev *dev) 215static void dump_err_buf(struct mlx4_dev *dev)
55{ 216{
@@ -67,58 +228,40 @@ static void poll_catas(unsigned long dev_ptr)
67{ 228{
68 struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr; 229 struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
69 struct mlx4_priv *priv = mlx4_priv(dev); 230 struct mlx4_priv *priv = mlx4_priv(dev);
231 u32 slave_read;
70 232
71 if (readl(priv->catas_err.map)) { 233 if (mlx4_is_slave(dev)) {
72 /* If the device is off-line, we cannot try to recover it */ 234 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
73 if (pci_channel_offline(dev->pdev)) 235 if (mlx4_comm_internal_err(slave_read)) {
74 mod_timer(&priv->catas_err.timer, 236 mlx4_warn(dev, "Internal error detected on the communication channel\n");
75 round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); 237 goto internal_err;
76 else {
77 dump_err_buf(dev);
78 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
79
80 if (internal_err_reset) {
81 spin_lock(&catas_lock);
82 list_add(&priv->catas_err.list, &catas_list);
83 spin_unlock(&catas_lock);
84
85 queue_work(mlx4_wq, &catas_work);
86 }
87 } 238 }
88 } else 239 } else if (readl(priv->catas_err.map)) {
89 mod_timer(&priv->catas_err.timer, 240 dump_err_buf(dev);
90 round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); 241 goto internal_err;
242 }
243
244 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
245 mlx4_warn(dev, "Internal error mark was detected on device\n");
246 goto internal_err;
247 }
248
249 mod_timer(&priv->catas_err.timer,
250 round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
251 return;
252
253internal_err:
254 if (mlx4_internal_err_reset)
255 queue_work(dev->persist->catas_wq, &dev->persist->catas_work);
91} 256}
92 257
93static void catas_reset(struct work_struct *work) 258static void catas_reset(struct work_struct *work)
94{ 259{
95 struct mlx4_priv *priv, *tmppriv; 260 struct mlx4_dev_persistent *persist =
96 struct mlx4_dev *dev; 261 container_of(work, struct mlx4_dev_persistent,
262 catas_work);
97 263
98 LIST_HEAD(tlist); 264 mlx4_handle_error_state(persist);
99 int ret;
100
101 spin_lock_irq(&catas_lock);
102 list_splice_init(&catas_list, &tlist);
103 spin_unlock_irq(&catas_lock);
104
105 list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
106 struct pci_dev *pdev = priv->dev.pdev;
107
108 /* If the device is off-line, we cannot reset it */
109 if (pci_channel_offline(pdev))
110 continue;
111
112 ret = mlx4_restart_one(priv->dev.pdev);
113 /* 'priv' now is not valid */
114 if (ret)
115 pr_err("mlx4 %s: Reset failed (%d)\n",
116 pci_name(pdev), ret);
117 else {
118 dev = pci_get_drvdata(pdev);
119 mlx4_dbg(dev, "Reset succeeded\n");
120 }
121 }
122} 265}
123 266
124void mlx4_start_catas_poll(struct mlx4_dev *dev) 267void mlx4_start_catas_poll(struct mlx4_dev *dev)
@@ -126,22 +269,21 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
126 struct mlx4_priv *priv = mlx4_priv(dev); 269 struct mlx4_priv *priv = mlx4_priv(dev);
127 phys_addr_t addr; 270 phys_addr_t addr;
128 271
129 /*If we are in SRIOV the default of the module param must be 0*/
130 if (mlx4_is_mfunc(dev))
131 internal_err_reset = 0;
132
133 INIT_LIST_HEAD(&priv->catas_err.list); 272 INIT_LIST_HEAD(&priv->catas_err.list);
134 init_timer(&priv->catas_err.timer); 273 init_timer(&priv->catas_err.timer);
135 priv->catas_err.map = NULL; 274 priv->catas_err.map = NULL;
136 275
137 addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + 276 if (!mlx4_is_slave(dev)) {
138 priv->fw.catas_offset; 277 addr = pci_resource_start(dev->persist->pdev,
278 priv->fw.catas_bar) +
279 priv->fw.catas_offset;
139 280
140 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); 281 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
141 if (!priv->catas_err.map) { 282 if (!priv->catas_err.map) {
142 mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n", 283 mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
143 (unsigned long long) addr); 284 (unsigned long long)addr);
144 return; 285 return;
286 }
145 } 287 }
146 288
147 priv->catas_err.timer.data = (unsigned long) dev; 289 priv->catas_err.timer.data = (unsigned long) dev;
@@ -157,15 +299,29 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
157 299
158 del_timer_sync(&priv->catas_err.timer); 300 del_timer_sync(&priv->catas_err.timer);
159 301
160 if (priv->catas_err.map) 302 if (priv->catas_err.map) {
161 iounmap(priv->catas_err.map); 303 iounmap(priv->catas_err.map);
304 priv->catas_err.map = NULL;
305 }
162 306
163 spin_lock_irq(&catas_lock); 307 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION)
164 list_del(&priv->catas_err.list); 308 flush_workqueue(dev->persist->catas_wq);
165 spin_unlock_irq(&catas_lock);
166} 309}
167 310
168void __init mlx4_catas_init(void) 311int mlx4_catas_init(struct mlx4_dev *dev)
169{ 312{
170 INIT_WORK(&catas_work, catas_reset); 313 INIT_WORK(&dev->persist->catas_work, catas_reset);
314 dev->persist->catas_wq = create_singlethread_workqueue("mlx4_health");
315 if (!dev->persist->catas_wq)
316 return -ENOMEM;
317
318 return 0;
319}
320
321void mlx4_catas_end(struct mlx4_dev *dev)
322{
323 if (dev->persist->catas_wq) {
324 destroy_workqueue(dev->persist->catas_wq);
325 dev->persist->catas_wq = NULL;
326 }
171} 327}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 5c93d1451c44..a681d7c0bb9f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -42,6 +42,7 @@
42#include <linux/mlx4/device.h> 42#include <linux/mlx4/device.h>
43#include <linux/semaphore.h> 43#include <linux/semaphore.h>
44#include <rdma/ib_smi.h> 44#include <rdma/ib_smi.h>
45#include <linux/delay.h>
45 46
46#include <asm/io.h> 47#include <asm/io.h>
47 48
@@ -182,6 +183,72 @@ static u8 mlx4_errno_to_status(int errno)
182 } 183 }
183} 184}
184 185
186static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
187 u8 op_modifier)
188{
189 switch (op) {
190 case MLX4_CMD_UNMAP_ICM:
191 case MLX4_CMD_UNMAP_ICM_AUX:
192 case MLX4_CMD_UNMAP_FA:
193 case MLX4_CMD_2RST_QP:
194 case MLX4_CMD_HW2SW_EQ:
195 case MLX4_CMD_HW2SW_CQ:
196 case MLX4_CMD_HW2SW_SRQ:
197 case MLX4_CMD_HW2SW_MPT:
198 case MLX4_CMD_CLOSE_HCA:
199 case MLX4_QP_FLOW_STEERING_DETACH:
200 case MLX4_CMD_FREE_RES:
201 case MLX4_CMD_CLOSE_PORT:
202 return CMD_STAT_OK;
203
204 case MLX4_CMD_QP_ATTACH:
205 /* On Detach case return success */
206 if (op_modifier == 0)
207 return CMD_STAT_OK;
208 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
209
210 default:
211 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
212 }
213}
214
215static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
216{
217 /* Any error during the closing commands below is considered fatal */
218 if (op == MLX4_CMD_CLOSE_HCA ||
219 op == MLX4_CMD_HW2SW_EQ ||
220 op == MLX4_CMD_HW2SW_CQ ||
221 op == MLX4_CMD_2RST_QP ||
222 op == MLX4_CMD_HW2SW_SRQ ||
223 op == MLX4_CMD_SYNC_TPT ||
224 op == MLX4_CMD_UNMAP_ICM ||
225 op == MLX4_CMD_UNMAP_ICM_AUX ||
226 op == MLX4_CMD_UNMAP_FA)
227 return 1;
228 /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
229 * CMD_STAT_REG_BOUND.
230 * This status indicates that memory region has memory windows bound to it
231 * which may result from invalid user space usage and is not fatal.
232 */
233 if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
234 return 1;
235 return 0;
236}
237
238static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
239 int err)
240{
241 /* Only if reset flow is really active return code is based on
242 * command, otherwise current error code is returned.
243 */
244 if (mlx4_internal_err_reset) {
245 mlx4_enter_error_state(dev->persist);
246 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
247 }
248
249 return err;
250}
251
185static int comm_pending(struct mlx4_dev *dev) 252static int comm_pending(struct mlx4_dev *dev)
186{ 253{
187 struct mlx4_priv *priv = mlx4_priv(dev); 254 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -190,16 +257,30 @@ static int comm_pending(struct mlx4_dev *dev)
190 return (swab32(status) >> 31) != priv->cmd.comm_toggle; 257 return (swab32(status) >> 31) != priv->cmd.comm_toggle;
191} 258}
192 259
193static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param) 260static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
194{ 261{
195 struct mlx4_priv *priv = mlx4_priv(dev); 262 struct mlx4_priv *priv = mlx4_priv(dev);
196 u32 val; 263 u32 val;
197 264
265 /* To avoid writing to unknown addresses after the device state was
266 * changed to internal error and the function was rest,
267 * check the INTERNAL_ERROR flag which is updated under
268 * device_state_mutex lock.
269 */
270 mutex_lock(&dev->persist->device_state_mutex);
271
272 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
273 mutex_unlock(&dev->persist->device_state_mutex);
274 return -EIO;
275 }
276
198 priv->cmd.comm_toggle ^= 1; 277 priv->cmd.comm_toggle ^= 1;
199 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31); 278 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
200 __raw_writel((__force u32) cpu_to_be32(val), 279 __raw_writel((__force u32) cpu_to_be32(val),
201 &priv->mfunc.comm->slave_write); 280 &priv->mfunc.comm->slave_write);
202 mmiowb(); 281 mmiowb();
282 mutex_unlock(&dev->persist->device_state_mutex);
283 return 0;
203} 284}
204 285
205static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, 286static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
@@ -219,7 +300,13 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
219 300
220 /* Write command */ 301 /* Write command */
221 down(&priv->cmd.poll_sem); 302 down(&priv->cmd.poll_sem);
222 mlx4_comm_cmd_post(dev, cmd, param); 303 if (mlx4_comm_cmd_post(dev, cmd, param)) {
304 /* Only in case the device state is INTERNAL_ERROR,
305 * mlx4_comm_cmd_post returns with an error
306 */
307 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
308 goto out;
309 }
223 310
224 end = msecs_to_jiffies(timeout) + jiffies; 311 end = msecs_to_jiffies(timeout) + jiffies;
225 while (comm_pending(dev) && time_before(jiffies, end)) 312 while (comm_pending(dev) && time_before(jiffies, end))
@@ -231,18 +318,23 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
231 * is MLX4_DELAY_RESET_SLAVE*/ 318 * is MLX4_DELAY_RESET_SLAVE*/
232 if ((MLX4_COMM_CMD_RESET == cmd)) { 319 if ((MLX4_COMM_CMD_RESET == cmd)) {
233 err = MLX4_DELAY_RESET_SLAVE; 320 err = MLX4_DELAY_RESET_SLAVE;
321 goto out;
234 } else { 322 } else {
235 mlx4_warn(dev, "Communication channel timed out\n"); 323 mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
236 err = -ETIMEDOUT; 324 cmd);
325 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
237 } 326 }
238 } 327 }
239 328
329 if (err)
330 mlx4_enter_error_state(dev->persist);
331out:
240 up(&priv->cmd.poll_sem); 332 up(&priv->cmd.poll_sem);
241 return err; 333 return err;
242} 334}
243 335
244static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op, 336static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
245 u16 param, unsigned long timeout) 337 u16 param, u16 op, unsigned long timeout)
246{ 338{
247 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 339 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
248 struct mlx4_cmd_context *context; 340 struct mlx4_cmd_context *context;
@@ -258,34 +350,49 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
258 cmd->free_head = context->next; 350 cmd->free_head = context->next;
259 spin_unlock(&cmd->context_lock); 351 spin_unlock(&cmd->context_lock);
260 352
261 init_completion(&context->done); 353 reinit_completion(&context->done);
262 354
263 mlx4_comm_cmd_post(dev, op, param); 355 if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
356 /* Only in case the device state is INTERNAL_ERROR,
357 * mlx4_comm_cmd_post returns with an error
358 */
359 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
360 goto out;
361 }
264 362
265 if (!wait_for_completion_timeout(&context->done, 363 if (!wait_for_completion_timeout(&context->done,
266 msecs_to_jiffies(timeout))) { 364 msecs_to_jiffies(timeout))) {
267 mlx4_warn(dev, "communication channel command 0x%x timed out\n", 365 mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
268 op); 366 vhcr_cmd, op);
269 err = -EBUSY; 367 goto out_reset;
270 goto out;
271 } 368 }
272 369
273 err = context->result; 370 err = context->result;
274 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) { 371 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
275 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", 372 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
276 op, context->fw_status); 373 vhcr_cmd, context->fw_status);
277 goto out; 374 if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
375 goto out_reset;
278 } 376 }
279 377
280out:
281 /* wait for comm channel ready 378 /* wait for comm channel ready
282 * this is necessary for prevention the race 379 * this is necessary for prevention the race
283 * when switching between event to polling mode 380 * when switching between event to polling mode
381 * Skipping this section in case the device is in FATAL_ERROR state,
382 * In this state, no commands are sent via the comm channel until
383 * the device has returned from reset.
284 */ 384 */
285 end = msecs_to_jiffies(timeout) + jiffies; 385 if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
286 while (comm_pending(dev) && time_before(jiffies, end)) 386 end = msecs_to_jiffies(timeout) + jiffies;
287 cond_resched(); 387 while (comm_pending(dev) && time_before(jiffies, end))
388 cond_resched();
389 }
390 goto out;
288 391
392out_reset:
393 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
394 mlx4_enter_error_state(dev->persist);
395out:
289 spin_lock(&cmd->context_lock); 396 spin_lock(&cmd->context_lock);
290 context->next = cmd->free_head; 397 context->next = cmd->free_head;
291 cmd->free_head = context - cmd->context; 398 cmd->free_head = context - cmd->context;
@@ -296,10 +403,13 @@ out:
296} 403}
297 404
298int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, 405int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
299 unsigned long timeout) 406 u16 op, unsigned long timeout)
300{ 407{
408 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
409 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
410
301 if (mlx4_priv(dev)->cmd.use_events) 411 if (mlx4_priv(dev)->cmd.use_events)
302 return mlx4_comm_cmd_wait(dev, cmd, param, timeout); 412 return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
303 return mlx4_comm_cmd_poll(dev, cmd, param, timeout); 413 return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
304} 414}
305 415
@@ -307,7 +417,7 @@ static int cmd_pending(struct mlx4_dev *dev)
307{ 417{
308 u32 status; 418 u32 status;
309 419
310 if (pci_channel_offline(dev->pdev)) 420 if (pci_channel_offline(dev->persist->pdev))
311 return -EIO; 421 return -EIO;
312 422
313 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); 423 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -323,17 +433,21 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
323{ 433{
324 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 434 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
325 u32 __iomem *hcr = cmd->hcr; 435 u32 __iomem *hcr = cmd->hcr;
326 int ret = -EAGAIN; 436 int ret = -EIO;
327 unsigned long end; 437 unsigned long end;
328 438
329 mutex_lock(&cmd->hcr_mutex); 439 mutex_lock(&dev->persist->device_state_mutex);
330 440 /* To avoid writing to unknown addresses after the device state was
331 if (pci_channel_offline(dev->pdev)) { 441 * changed to internal error and the chip was reset,
442 * check the INTERNAL_ERROR flag which is updated under
443 * device_state_mutex lock.
444 */
445 if (pci_channel_offline(dev->persist->pdev) ||
446 (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
332 /* 447 /*
333 * Device is going through error recovery 448 * Device is going through error recovery
334 * and cannot accept commands. 449 * and cannot accept commands.
335 */ 450 */
336 ret = -EIO;
337 goto out; 451 goto out;
338 } 452 }
339 453
@@ -342,12 +456,11 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
342 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); 456 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
343 457
344 while (cmd_pending(dev)) { 458 while (cmd_pending(dev)) {
345 if (pci_channel_offline(dev->pdev)) { 459 if (pci_channel_offline(dev->persist->pdev)) {
346 /* 460 /*
347 * Device is going through error recovery 461 * Device is going through error recovery
348 * and cannot accept commands. 462 * and cannot accept commands.
349 */ 463 */
350 ret = -EIO;
351 goto out; 464 goto out;
352 } 465 }
353 466
@@ -391,7 +504,11 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
391 ret = 0; 504 ret = 0;
392 505
393out: 506out:
394 mutex_unlock(&cmd->hcr_mutex); 507 if (ret)
508 mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
509 op, ret, in_param, in_modifier, op_modifier);
510 mutex_unlock(&dev->persist->device_state_mutex);
511
395 return ret; 512 return ret;
396} 513}
397 514
@@ -428,8 +545,11 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
428 } 545 }
429 ret = mlx4_status_to_errno(vhcr->status); 546 ret = mlx4_status_to_errno(vhcr->status);
430 } 547 }
548 if (ret &&
549 dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
550 ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
431 } else { 551 } else {
432 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, 552 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
433 MLX4_COMM_TIME + timeout); 553 MLX4_COMM_TIME + timeout);
434 if (!ret) { 554 if (!ret) {
435 if (out_is_imm) { 555 if (out_is_imm) {
@@ -443,9 +563,14 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
443 } 563 }
444 } 564 }
445 ret = mlx4_status_to_errno(vhcr->status); 565 ret = mlx4_status_to_errno(vhcr->status);
446 } else 566 } else {
447 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", 567 if (dev->persist->state &
448 op); 568 MLX4_DEVICE_STATE_INTERNAL_ERROR)
569 ret = mlx4_internal_err_ret_value(dev, op,
570 op_modifier);
571 else
572 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
573 }
449 } 574 }
450 575
451 mutex_unlock(&priv->cmd.slave_cmd_mutex); 576 mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -464,12 +589,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
464 589
465 down(&priv->cmd.poll_sem); 590 down(&priv->cmd.poll_sem);
466 591
467 if (pci_channel_offline(dev->pdev)) { 592 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
468 /* 593 /*
469 * Device is going through error recovery 594 * Device is going through error recovery
470 * and cannot accept commands. 595 * and cannot accept commands.
471 */ 596 */
472 err = -EIO; 597 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
473 goto out; 598 goto out;
474 } 599 }
475 600
@@ -483,16 +608,21 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
483 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 608 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
484 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); 609 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
485 if (err) 610 if (err)
486 goto out; 611 goto out_reset;
487 612
488 end = msecs_to_jiffies(timeout) + jiffies; 613 end = msecs_to_jiffies(timeout) + jiffies;
489 while (cmd_pending(dev) && time_before(jiffies, end)) { 614 while (cmd_pending(dev) && time_before(jiffies, end)) {
490 if (pci_channel_offline(dev->pdev)) { 615 if (pci_channel_offline(dev->persist->pdev)) {
491 /* 616 /*
492 * Device is going through error recovery 617 * Device is going through error recovery
493 * and cannot accept commands. 618 * and cannot accept commands.
494 */ 619 */
495 err = -EIO; 620 err = -EIO;
621 goto out_reset;
622 }
623
624 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
625 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
496 goto out; 626 goto out;
497 } 627 }
498 628
@@ -502,8 +632,8 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
502 if (cmd_pending(dev)) { 632 if (cmd_pending(dev)) {
503 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 633 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
504 op); 634 op);
505 err = -ETIMEDOUT; 635 err = -EIO;
506 goto out; 636 goto out_reset;
507 } 637 }
508 638
509 if (out_is_imm) 639 if (out_is_imm)
@@ -515,10 +645,17 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
515 stat = be32_to_cpu((__force __be32) 645 stat = be32_to_cpu((__force __be32)
516 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24; 646 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
517 err = mlx4_status_to_errno(stat); 647 err = mlx4_status_to_errno(stat);
518 if (err) 648 if (err) {
519 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", 649 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
520 op, stat); 650 op, stat);
651 if (mlx4_closing_cmd_fatal_error(op, stat))
652 goto out_reset;
653 goto out;
654 }
521 655
656out_reset:
657 if (err)
658 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
522out: 659out:
523 up(&priv->cmd.poll_sem); 660 up(&priv->cmd.poll_sem);
524 return err; 661 return err;
@@ -565,17 +702,19 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
565 goto out; 702 goto out;
566 } 703 }
567 704
568 init_completion(&context->done); 705 reinit_completion(&context->done);
569 706
570 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 707 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
571 in_modifier, op_modifier, op, context->token, 1); 708 in_modifier, op_modifier, op, context->token, 1);
709 if (err)
710 goto out_reset;
572 711
573 if (!wait_for_completion_timeout(&context->done, 712 if (!wait_for_completion_timeout(&context->done,
574 msecs_to_jiffies(timeout))) { 713 msecs_to_jiffies(timeout))) {
575 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 714 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
576 op); 715 op);
577 err = -EBUSY; 716 err = -EIO;
578 goto out; 717 goto out_reset;
579 } 718 }
580 719
581 err = context->result; 720 err = context->result;
@@ -592,12 +731,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
592 else 731 else
593 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", 732 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
594 op, context->fw_status); 733 op, context->fw_status);
734 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
735 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
736 else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
737 goto out_reset;
738
595 goto out; 739 goto out;
596 } 740 }
597 741
598 if (out_is_imm) 742 if (out_is_imm)
599 *out_param = context->out_param; 743 *out_param = context->out_param;
600 744
745out_reset:
746 if (err)
747 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
601out: 748out:
602 spin_lock(&cmd->context_lock); 749 spin_lock(&cmd->context_lock);
603 context->next = cmd->free_head; 750 context->next = cmd->free_head;
@@ -612,10 +759,13 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
612 int out_is_imm, u32 in_modifier, u8 op_modifier, 759 int out_is_imm, u32 in_modifier, u8 op_modifier,
613 u16 op, unsigned long timeout, int native) 760 u16 op, unsigned long timeout, int native)
614{ 761{
615 if (pci_channel_offline(dev->pdev)) 762 if (pci_channel_offline(dev->persist->pdev))
616 return -EIO; 763 return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
617 764
618 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { 765 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
766 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
767 return mlx4_internal_err_ret_value(dev, op,
768 op_modifier);
619 if (mlx4_priv(dev)->cmd.use_events) 769 if (mlx4_priv(dev)->cmd.use_events)
620 return mlx4_cmd_wait(dev, in_param, out_param, 770 return mlx4_cmd_wait(dev, in_param, out_param,
621 out_is_imm, in_modifier, 771 out_is_imm, in_modifier,
@@ -631,7 +781,7 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
631EXPORT_SYMBOL_GPL(__mlx4_cmd); 781EXPORT_SYMBOL_GPL(__mlx4_cmd);
632 782
633 783
634static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev) 784int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
635{ 785{
636 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL, 786 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
637 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 787 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
@@ -751,7 +901,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
751 index = be32_to_cpu(smp->attr_mod); 901 index = be32_to_cpu(smp->attr_mod);
752 if (port < 1 || port > dev->caps.num_ports) 902 if (port < 1 || port > dev->caps.num_ports)
753 return -EINVAL; 903 return -EINVAL;
754 table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL); 904 table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
905 sizeof(*table) * 32, GFP_KERNEL);
906
755 if (!table) 907 if (!table)
756 return -ENOMEM; 908 return -ENOMEM;
757 /* need to get the full pkey table because the paravirtualized 909 /* need to get the full pkey table because the paravirtualized
@@ -1071,7 +1223,7 @@ static struct mlx4_cmd_info cmd_info[] = {
1071 { 1223 {
1072 .opcode = MLX4_CMD_HW2SW_EQ, 1224 .opcode = MLX4_CMD_HW2SW_EQ,
1073 .has_inbox = false, 1225 .has_inbox = false,
1074 .has_outbox = true, 1226 .has_outbox = false,
1075 .out_is_imm = false, 1227 .out_is_imm = false,
1076 .encode_slave_id = true, 1228 .encode_slave_id = true,
1077 .verify = NULL, 1229 .verify = NULL,
@@ -1431,6 +1583,15 @@ static struct mlx4_cmd_info cmd_info[] = {
1431 .verify = NULL, 1583 .verify = NULL,
1432 .wrapper = mlx4_CMD_EPERM_wrapper 1584 .wrapper = mlx4_CMD_EPERM_wrapper
1433 }, 1585 },
1586 {
1587 .opcode = MLX4_CMD_VIRT_PORT_MAP,
1588 .has_inbox = false,
1589 .has_outbox = false,
1590 .out_is_imm = false,
1591 .encode_slave_id = false,
1592 .verify = NULL,
1593 .wrapper = mlx4_CMD_EPERM_wrapper
1594 },
1434}; 1595};
1435 1596
1436static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, 1597static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
@@ -1460,8 +1621,10 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1460 ALIGN(sizeof(struct mlx4_vhcr_cmd), 1621 ALIGN(sizeof(struct mlx4_vhcr_cmd),
1461 MLX4_ACCESS_MEM_ALIGN), 1); 1622 MLX4_ACCESS_MEM_ALIGN), 1);
1462 if (ret) { 1623 if (ret) {
1463 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n", 1624 if (!(dev->persist->state &
1464 __func__, ret); 1625 MLX4_DEVICE_STATE_INTERNAL_ERROR))
1626 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1627 __func__, ret);
1465 kfree(vhcr); 1628 kfree(vhcr);
1466 return ret; 1629 return ret;
1467 } 1630 }
@@ -1500,11 +1663,14 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1500 goto out_status; 1663 goto out_status;
1501 } 1664 }
1502 1665
1503 if (mlx4_ACCESS_MEM(dev, inbox->dma, slave, 1666 ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1504 vhcr->in_param, 1667 vhcr->in_param,
1505 MLX4_MAILBOX_SIZE, 1)) { 1668 MLX4_MAILBOX_SIZE, 1);
1506 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n", 1669 if (ret) {
1507 __func__, cmd->opcode); 1670 if (!(dev->persist->state &
1671 MLX4_DEVICE_STATE_INTERNAL_ERROR))
1672 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1673 __func__, cmd->opcode);
1508 vhcr_cmd->status = CMD_STAT_INTERNAL_ERR; 1674 vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1509 goto out_status; 1675 goto out_status;
1510 } 1676 }
@@ -1552,8 +1718,9 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1552 } 1718 }
1553 1719
1554 if (err) { 1720 if (err) {
1555 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n", 1721 if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
1556 vhcr->op, slave, vhcr->errno, err); 1722 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1723 vhcr->op, slave, vhcr->errno, err);
1557 vhcr_cmd->status = mlx4_errno_to_status(err); 1724 vhcr_cmd->status = mlx4_errno_to_status(err);
1558 goto out_status; 1725 goto out_status;
1559 } 1726 }
@@ -1568,7 +1735,9 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1568 /* If we failed to write back the outbox after the 1735 /* If we failed to write back the outbox after the
1569 *command was successfully executed, we must fail this 1736 *command was successfully executed, we must fail this
1570 * slave, as it is now in undefined state */ 1737 * slave, as it is now in undefined state */
1571 mlx4_err(dev, "%s:Failed writing outbox\n", __func__); 1738 if (!(dev->persist->state &
1739 MLX4_DEVICE_STATE_INTERNAL_ERROR))
1740 mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1572 goto out; 1741 goto out;
1573 } 1742 }
1574 } 1743 }
@@ -1847,8 +2016,11 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1847 break; 2016 break;
1848 case MLX4_COMM_CMD_VHCR_POST: 2017 case MLX4_COMM_CMD_VHCR_POST:
1849 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && 2018 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
1850 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) 2019 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2020 mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2021 slave, cmd, slave_state[slave].last_cmd);
1851 goto reset_slave; 2022 goto reset_slave;
2023 }
1852 2024
1853 mutex_lock(&priv->cmd.slave_cmd_mutex); 2025 mutex_lock(&priv->cmd.slave_cmd_mutex);
1854 if (mlx4_master_process_vhcr(dev, slave, NULL)) { 2026 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
@@ -1882,7 +2054,18 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1882 2054
1883reset_slave: 2055reset_slave:
1884 /* cleanup any slave resources */ 2056 /* cleanup any slave resources */
1885 mlx4_delete_all_resources_for_slave(dev, slave); 2057 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2058 mlx4_delete_all_resources_for_slave(dev, slave);
2059
2060 if (cmd != MLX4_COMM_CMD_RESET) {
2061 mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2062 slave, cmd);
2063 /* Turn on internal error letting slave reset itself immeditaly,
2064 * otherwise it might take till timeout on command is passed
2065 */
2066 reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2067 }
2068
1886 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 2069 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
1887 if (!slave_state[slave].is_slave_going_down) 2070 if (!slave_state[slave].is_slave_going_down)
1888 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET; 2071 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
@@ -1958,17 +2141,28 @@ void mlx4_master_comm_channel(struct work_struct *work)
1958static int sync_toggles(struct mlx4_dev *dev) 2141static int sync_toggles(struct mlx4_dev *dev)
1959{ 2142{
1960 struct mlx4_priv *priv = mlx4_priv(dev); 2143 struct mlx4_priv *priv = mlx4_priv(dev);
1961 int wr_toggle; 2144 u32 wr_toggle;
1962 int rd_toggle; 2145 u32 rd_toggle;
1963 unsigned long end; 2146 unsigned long end;
1964 2147
1965 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31; 2148 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
1966 end = jiffies + msecs_to_jiffies(5000); 2149 if (wr_toggle == 0xffffffff)
2150 end = jiffies + msecs_to_jiffies(30000);
2151 else
2152 end = jiffies + msecs_to_jiffies(5000);
1967 2153
1968 while (time_before(jiffies, end)) { 2154 while (time_before(jiffies, end)) {
1969 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31; 2155 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
1970 if (rd_toggle == wr_toggle) { 2156 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
1971 priv->cmd.comm_toggle = rd_toggle; 2157 /* PCI might be offline */
2158 msleep(100);
2159 wr_toggle = swab32(readl(&priv->mfunc.comm->
2160 slave_write));
2161 continue;
2162 }
2163
2164 if (rd_toggle >> 31 == wr_toggle >> 31) {
2165 priv->cmd.comm_toggle = rd_toggle >> 31;
1972 return 0; 2166 return 0;
1973 } 2167 }
1974 2168
@@ -1997,11 +2191,12 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1997 2191
1998 if (mlx4_is_master(dev)) 2192 if (mlx4_is_master(dev))
1999 priv->mfunc.comm = 2193 priv->mfunc.comm =
2000 ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) + 2194 ioremap(pci_resource_start(dev->persist->pdev,
2195 priv->fw.comm_bar) +
2001 priv->fw.comm_base, MLX4_COMM_PAGESIZE); 2196 priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2002 else 2197 else
2003 priv->mfunc.comm = 2198 priv->mfunc.comm =
2004 ioremap(pci_resource_start(dev->pdev, 2) + 2199 ioremap(pci_resource_start(dev->persist->pdev, 2) +
2005 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); 2200 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2006 if (!priv->mfunc.comm) { 2201 if (!priv->mfunc.comm) {
2007 mlx4_err(dev, "Couldn't map communication vector\n"); 2202 mlx4_err(dev, "Couldn't map communication vector\n");
@@ -2073,13 +2268,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
2073 if (mlx4_init_resource_tracker(dev)) 2268 if (mlx4_init_resource_tracker(dev))
2074 goto err_thread; 2269 goto err_thread;
2075 2270
2076 err = mlx4_ARM_COMM_CHANNEL(dev);
2077 if (err) {
2078 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
2079 err);
2080 goto err_resource;
2081 }
2082
2083 } else { 2271 } else {
2084 err = sync_toggles(dev); 2272 err = sync_toggles(dev);
2085 if (err) { 2273 if (err) {
@@ -2089,8 +2277,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
2089 } 2277 }
2090 return 0; 2278 return 0;
2091 2279
2092err_resource:
2093 mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
2094err_thread: 2280err_thread:
2095 flush_workqueue(priv->mfunc.master.comm_wq); 2281 flush_workqueue(priv->mfunc.master.comm_wq);
2096 destroy_workqueue(priv->mfunc.master.comm_wq); 2282 destroy_workqueue(priv->mfunc.master.comm_wq);
@@ -2107,9 +2293,9 @@ err_comm_admin:
2107err_comm: 2293err_comm:
2108 iounmap(priv->mfunc.comm); 2294 iounmap(priv->mfunc.comm);
2109err_vhcr: 2295err_vhcr:
2110 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 2296 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2111 priv->mfunc.vhcr, 2297 priv->mfunc.vhcr,
2112 priv->mfunc.vhcr_dma); 2298 priv->mfunc.vhcr_dma);
2113 priv->mfunc.vhcr = NULL; 2299 priv->mfunc.vhcr = NULL;
2114 return -ENOMEM; 2300 return -ENOMEM;
2115} 2301}
@@ -2120,7 +2306,6 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2120 int flags = 0; 2306 int flags = 0;
2121 2307
2122 if (!priv->cmd.initialized) { 2308 if (!priv->cmd.initialized) {
2123 mutex_init(&priv->cmd.hcr_mutex);
2124 mutex_init(&priv->cmd.slave_cmd_mutex); 2309 mutex_init(&priv->cmd.slave_cmd_mutex);
2125 sema_init(&priv->cmd.poll_sem, 1); 2310 sema_init(&priv->cmd.poll_sem, 1);
2126 priv->cmd.use_events = 0; 2311 priv->cmd.use_events = 0;
@@ -2130,8 +2315,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2130 } 2315 }
2131 2316
2132 if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { 2317 if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2133 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + 2318 priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2134 MLX4_HCR_BASE, MLX4_HCR_SIZE); 2319 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2135 if (!priv->cmd.hcr) { 2320 if (!priv->cmd.hcr) {
2136 mlx4_err(dev, "Couldn't map command register\n"); 2321 mlx4_err(dev, "Couldn't map command register\n");
2137 goto err; 2322 goto err;
@@ -2140,7 +2325,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2140 } 2325 }
2141 2326
2142 if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { 2327 if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2143 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, 2328 priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2329 PAGE_SIZE,
2144 &priv->mfunc.vhcr_dma, 2330 &priv->mfunc.vhcr_dma,
2145 GFP_KERNEL); 2331 GFP_KERNEL);
2146 if (!priv->mfunc.vhcr) 2332 if (!priv->mfunc.vhcr)
@@ -2150,7 +2336,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2150 } 2336 }
2151 2337
2152 if (!priv->cmd.pool) { 2338 if (!priv->cmd.pool) {
2153 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, 2339 priv->cmd.pool = pci_pool_create("mlx4_cmd",
2340 dev->persist->pdev,
2154 MLX4_MAILBOX_SIZE, 2341 MLX4_MAILBOX_SIZE,
2155 MLX4_MAILBOX_SIZE, 0); 2342 MLX4_MAILBOX_SIZE, 0);
2156 if (!priv->cmd.pool) 2343 if (!priv->cmd.pool)
@@ -2166,6 +2353,27 @@ err:
2166 return -ENOMEM; 2353 return -ENOMEM;
2167} 2354}
2168 2355
2356void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2357{
2358 struct mlx4_priv *priv = mlx4_priv(dev);
2359 int slave;
2360 u32 slave_read;
2361
2362 /* Report an internal error event to all
2363 * communication channels.
2364 */
2365 for (slave = 0; slave < dev->num_slaves; slave++) {
2366 slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2367 slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2368 __raw_writel((__force u32)cpu_to_be32(slave_read),
2369 &priv->mfunc.comm[slave].slave_read);
2370 /* Make sure that our comm channel write doesn't
2371 * get mixed in with writes from another CPU.
2372 */
2373 mmiowb();
2374 }
2375}
2376
2169void mlx4_multi_func_cleanup(struct mlx4_dev *dev) 2377void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2170{ 2378{
2171 struct mlx4_priv *priv = mlx4_priv(dev); 2379 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2181,6 +2389,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2181 kfree(priv->mfunc.master.slave_state); 2389 kfree(priv->mfunc.master.slave_state);
2182 kfree(priv->mfunc.master.vf_admin); 2390 kfree(priv->mfunc.master.vf_admin);
2183 kfree(priv->mfunc.master.vf_oper); 2391 kfree(priv->mfunc.master.vf_oper);
2392 dev->num_slaves = 0;
2184 } 2393 }
2185 2394
2186 iounmap(priv->mfunc.comm); 2395 iounmap(priv->mfunc.comm);
@@ -2202,7 +2411,7 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2202 } 2411 }
2203 if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && 2412 if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2204 (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) { 2413 (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2205 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 2414 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2206 priv->mfunc.vhcr, priv->mfunc.vhcr_dma); 2415 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2207 priv->mfunc.vhcr = NULL; 2416 priv->mfunc.vhcr = NULL;
2208 } 2417 }
@@ -2229,6 +2438,11 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
2229 for (i = 0; i < priv->cmd.max_cmds; ++i) { 2438 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2230 priv->cmd.context[i].token = i; 2439 priv->cmd.context[i].token = i;
2231 priv->cmd.context[i].next = i + 1; 2440 priv->cmd.context[i].next = i + 1;
2441 /* To support fatal error flow, initialize all
2442 * cmd contexts to allow simulating completions
2443 * with complete() at any time.
2444 */
2445 init_completion(&priv->cmd.context[i].done);
2232 } 2446 }
2233 2447
2234 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; 2448 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
@@ -2306,8 +2520,9 @@ u32 mlx4_comm_get_version(void)
2306 2520
2307static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) 2521static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2308{ 2522{
2309 if ((vf < 0) || (vf >= dev->num_vfs)) { 2523 if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2310 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs); 2524 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2525 vf, dev->persist->num_vfs);
2311 return -EINVAL; 2526 return -EINVAL;
2312 } 2527 }
2313 2528
@@ -2316,7 +2531,7 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2316 2531
2317int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) 2532int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2318{ 2533{
2319 if (slave < 1 || slave > dev->num_vfs) { 2534 if (slave < 1 || slave > dev->persist->num_vfs) {
2320 mlx4_err(dev, 2535 mlx4_err(dev,
2321 "Bad slave number:%d (number of activated slaves: %lu)\n", 2536 "Bad slave number:%d (number of activated slaves: %lu)\n",
2322 slave, dev->num_slaves); 2537 slave, dev->num_slaves);
@@ -2325,6 +2540,25 @@ int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2325 return slave - 1; 2540 return slave - 1;
2326} 2541}
2327 2542
2543void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2544{
2545 struct mlx4_priv *priv = mlx4_priv(dev);
2546 struct mlx4_cmd_context *context;
2547 int i;
2548
2549 spin_lock(&priv->cmd.context_lock);
2550 if (priv->cmd.context) {
2551 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2552 context = &priv->cmd.context[i];
2553 context->fw_status = CMD_STAT_INTERNAL_ERR;
2554 context->result =
2555 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2556 complete(&context->done);
2557 }
2558 }
2559 spin_unlock(&priv->cmd.context_lock);
2560}
2561
2328struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave) 2562struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2329{ 2563{
2330 struct mlx4_active_ports actv_ports; 2564 struct mlx4_active_ports actv_ports;
@@ -2388,7 +2622,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2388 if (port <= 0 || port > dev->caps.num_ports) 2622 if (port <= 0 || port > dev->caps.num_ports)
2389 return slaves_pport; 2623 return slaves_pport;
2390 2624
2391 for (i = 0; i < dev->num_vfs + 1; i++) { 2625 for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2392 struct mlx4_active_ports actv_ports = 2626 struct mlx4_active_ports actv_ports =
2393 mlx4_get_active_ports(dev, i); 2627 mlx4_get_active_ports(dev, i);
2394 if (test_bit(port - 1, actv_ports.ports)) 2628 if (test_bit(port - 1, actv_ports.ports))
@@ -2408,7 +2642,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2408 2642
2409 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); 2643 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2410 2644
2411 for (i = 0; i < dev->num_vfs + 1; i++) { 2645 for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2412 struct mlx4_active_ports actv_ports = 2646 struct mlx4_active_ports actv_ports =
2413 mlx4_get_active_ports(dev, i); 2647 mlx4_get_active_ports(dev, i);
2414 if (bitmap_equal(crit_ports->ports, actv_ports.ports, 2648 if (bitmap_equal(crit_ports->ports, actv_ports.ports,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 999014413b1a..90b5309cdb5c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/mlx4/device.h> 34#include <linux/mlx4/device.h>
35#include <linux/clocksource.h>
35 36
36#include "mlx4_en.h" 37#include "mlx4_en.h"
37 38
@@ -147,12 +148,9 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
147 struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, 148 struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
148 ptp_clock_info); 149 ptp_clock_info);
149 unsigned long flags; 150 unsigned long flags;
150 s64 now;
151 151
152 write_lock_irqsave(&mdev->clock_lock, flags); 152 write_lock_irqsave(&mdev->clock_lock, flags);
153 now = timecounter_read(&mdev->clock); 153 timecounter_adjtime(&mdev->clock, delta);
154 now += delta;
155 timecounter_init(&mdev->clock, &mdev->cycles, now);
156 write_unlock_irqrestore(&mdev->clock_lock, flags); 154 write_unlock_irqrestore(&mdev->clock_lock, flags);
157 155
158 return 0; 156 return 0;
@@ -243,7 +241,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
243{ 241{
244 struct mlx4_dev *dev = mdev->dev; 242 struct mlx4_dev *dev = mdev->dev;
245 unsigned long flags; 243 unsigned long flags;
246 u64 ns; 244 u64 ns, zero = 0;
247 245
248 rwlock_init(&mdev->clock_lock); 246 rwlock_init(&mdev->clock_lock);
249 247
@@ -268,7 +266,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
268 /* Calculate period in seconds to call the overflow watchdog - to make 266 /* Calculate period in seconds to call the overflow watchdog - to make
269 * sure counter is checked at least once every wrap around. 267 * sure counter is checked at least once every wrap around.
270 */ 268 */
271 ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask); 269 ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
272 do_div(ns, NSEC_PER_SEC / 2 / HZ); 270 do_div(ns, NSEC_PER_SEC / 2 / HZ);
273 mdev->overflow_period = ns; 271 mdev->overflow_period = ns;
274 272
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 82322b1c8411..22da4d0d0f05 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -70,10 +70,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
70 /* Allocate HW buffers on provided NUMA node. 70 /* Allocate HW buffers on provided NUMA node.
71 * dev->numa_node is used in mtt range allocation flow. 71 * dev->numa_node is used in mtt range allocation flow.
72 */ 72 */
73 set_dev_node(&mdev->dev->pdev->dev, node); 73 set_dev_node(&mdev->dev->persist->pdev->dev, node);
74 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, 74 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
75 cq->buf_size, 2 * PAGE_SIZE); 75 cq->buf_size, 2 * PAGE_SIZE);
76 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); 76 set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
77 if (err) 77 if (err)
78 goto err_cq; 78 goto err_cq;
79 79
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 90e0f045a6bc..a7b58ba8492b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -92,7 +92,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
92 (u16) (mdev->dev->caps.fw_ver >> 32), 92 (u16) (mdev->dev->caps.fw_ver >> 32),
93 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), 93 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
94 (u16) (mdev->dev->caps.fw_ver & 0xffff)); 94 (u16) (mdev->dev->caps.fw_ver & 0xffff));
95 strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 95 strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
96 sizeof(drvinfo->bus_info)); 96 sizeof(drvinfo->bus_info));
97 drvinfo->n_stats = 0; 97 drvinfo->n_stats = 0;
98 drvinfo->regdump_len = 0; 98 drvinfo->regdump_len = 0;
@@ -770,22 +770,20 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
770 return 0; 770 return 0;
771 } 771 }
772 772
773 proto_admin = cpu_to_be32(ptys_adv); 773 proto_admin = cmd->autoneg == AUTONEG_ENABLE ?
774 if (speed >= 0 && speed != priv->port_state.link_speed) 774 cpu_to_be32(ptys_adv) :
775 /* If speed was set then speed decides :-) */ 775 speed_set_ptys_admin(priv, speed,
776 proto_admin = speed_set_ptys_admin(priv, speed, 776 ptys_reg.eth_proto_cap);
777 ptys_reg.eth_proto_cap);
778 777
779 proto_admin &= ptys_reg.eth_proto_cap; 778 proto_admin &= ptys_reg.eth_proto_cap;
780
781 if (proto_admin == ptys_reg.eth_proto_admin)
782 return 0; /* Nothing to change */
783
784 if (!proto_admin) { 779 if (!proto_admin) {
785 en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); 780 en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
786 return -EINVAL; /* nothing to change due to bad input */ 781 return -EINVAL; /* nothing to change due to bad input */
787 } 782 }
788 783
784 if (proto_admin == ptys_reg.eth_proto_admin)
785 return 0; /* Nothing to change */
786
789 en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", 787 en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
790 be32_to_cpu(proto_admin)); 788 be32_to_cpu(proto_admin));
791 789
@@ -798,9 +796,9 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
798 return ret; 796 return ret;
799 } 797 }
800 798
801 en_warn(priv, "Port link mode changed, restarting port...\n");
802 mutex_lock(&priv->mdev->state_lock); 799 mutex_lock(&priv->mdev->state_lock);
803 if (priv->port_up) { 800 if (priv->port_up) {
801 en_warn(priv, "Port link mode changed, restarting port...\n");
804 mlx4_en_stop_port(dev, 1); 802 mlx4_en_stop_port(dev, 1);
805 if (mlx4_en_start_port(dev)) 803 if (mlx4_en_start_port(dev))
806 en_err(priv, "Failed restarting port %d\n", priv->port); 804 en_err(priv, "Failed restarting port %d\n", priv->port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 9f16f754137b..58d5a07d0ff4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -214,6 +214,8 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
214 iounmap(mdev->uar_map); 214 iounmap(mdev->uar_map);
215 mlx4_uar_free(dev, &mdev->priv_uar); 215 mlx4_uar_free(dev, &mdev->priv_uar);
216 mlx4_pd_free(dev, mdev->priv_pdn); 216 mlx4_pd_free(dev, mdev->priv_pdn);
217 if (mdev->nb.notifier_call)
218 unregister_netdevice_notifier(&mdev->nb);
217 kfree(mdev); 219 kfree(mdev);
218} 220}
219 221
@@ -241,8 +243,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
241 spin_lock_init(&mdev->uar_lock); 243 spin_lock_init(&mdev->uar_lock);
242 244
243 mdev->dev = dev; 245 mdev->dev = dev;
244 mdev->dma_device = &(dev->pdev->dev); 246 mdev->dma_device = &dev->persist->pdev->dev;
245 mdev->pdev = dev->pdev; 247 mdev->pdev = dev->persist->pdev;
246 mdev->device_up = false; 248 mdev->device_up = false;
247 249
248 mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); 250 mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
@@ -298,6 +300,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
298 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) 300 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
299 mdev->pndev[i] = NULL; 301 mdev->pndev[i] = NULL;
300 } 302 }
303 /* register notifier */
304 mdev->nb.notifier_call = mlx4_en_netdev_event;
305 if (register_netdevice_notifier(&mdev->nb)) {
306 mdev->nb.notifier_call = NULL;
307 mlx4_err(mdev, "Failed to create notifier\n");
308 }
301 309
302 return mdev; 310 return mdev;
303 311
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index ac6a8f1eea6c..2a210c4efb89 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2062,6 +2062,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2062 /* Detach the netdev so tasks would not attempt to access it */ 2062 /* Detach the netdev so tasks would not attempt to access it */
2063 mutex_lock(&mdev->state_lock); 2063 mutex_lock(&mdev->state_lock);
2064 mdev->pndev[priv->port] = NULL; 2064 mdev->pndev[priv->port] = NULL;
2065 mdev->upper[priv->port] = NULL;
2065 mutex_unlock(&mdev->state_lock); 2066 mutex_unlock(&mdev->state_lock);
2066 2067
2067 mlx4_en_free_resources(priv); 2068 mlx4_en_free_resources(priv);
@@ -2201,6 +2202,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
2201 return ret; 2202 return ret;
2202 } 2203 }
2203 2204
2205 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2206 en_info(priv, "Turn %s TX vlan strip offload\n",
2207 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2208
2204 if (features & NETIF_F_LOOPBACK) 2209 if (features & NETIF_F_LOOPBACK)
2205 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 2210 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
2206 else 2211 else
@@ -2441,6 +2446,180 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2441#endif 2446#endif
2442}; 2447};
2443 2448
2449struct mlx4_en_bond {
2450 struct work_struct work;
2451 struct mlx4_en_priv *priv;
2452 int is_bonded;
2453 struct mlx4_port_map port_map;
2454};
2455
2456static void mlx4_en_bond_work(struct work_struct *work)
2457{
2458 struct mlx4_en_bond *bond = container_of(work,
2459 struct mlx4_en_bond,
2460 work);
2461 int err = 0;
2462 struct mlx4_dev *dev = bond->priv->mdev->dev;
2463
2464 if (bond->is_bonded) {
2465 if (!mlx4_is_bonded(dev)) {
2466 err = mlx4_bond(dev);
2467 if (err)
2468 en_err(bond->priv, "Fail to bond device\n");
2469 }
2470 if (!err) {
2471 err = mlx4_port_map_set(dev, &bond->port_map);
2472 if (err)
2473 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2474 bond->port_map.port1,
2475 bond->port_map.port2,
2476 err);
2477 }
2478 } else if (mlx4_is_bonded(dev)) {
2479 err = mlx4_unbond(dev);
2480 if (err)
2481 en_err(bond->priv, "Fail to unbond device\n");
2482 }
2483 dev_put(bond->priv->dev);
2484 kfree(bond);
2485}
2486
2487static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2488 u8 v2p_p1, u8 v2p_p2)
2489{
2490 struct mlx4_en_bond *bond = NULL;
2491
2492 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2493 if (!bond)
2494 return -ENOMEM;
2495
2496 INIT_WORK(&bond->work, mlx4_en_bond_work);
2497 bond->priv = priv;
2498 bond->is_bonded = is_bonded;
2499 bond->port_map.port1 = v2p_p1;
2500 bond->port_map.port2 = v2p_p2;
2501 dev_hold(priv->dev);
2502 queue_work(priv->mdev->workqueue, &bond->work);
2503 return 0;
2504}
2505
2506int mlx4_en_netdev_event(struct notifier_block *this,
2507 unsigned long event, void *ptr)
2508{
2509 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2510 u8 port = 0;
2511 struct mlx4_en_dev *mdev;
2512 struct mlx4_dev *dev;
2513 int i, num_eth_ports = 0;
2514 bool do_bond = true;
2515 struct mlx4_en_priv *priv;
2516 u8 v2p_port1 = 0;
2517 u8 v2p_port2 = 0;
2518
2519 if (!net_eq(dev_net(ndev), &init_net))
2520 return NOTIFY_DONE;
2521
2522 mdev = container_of(this, struct mlx4_en_dev, nb);
2523 dev = mdev->dev;
2524
2525 /* Go into this mode only when two network devices set on two ports
2526 * of the same mlx4 device are slaves of the same bonding master
2527 */
2528 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2529 ++num_eth_ports;
2530 if (!port && (mdev->pndev[i] == ndev))
2531 port = i;
2532 mdev->upper[i] = mdev->pndev[i] ?
2533 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2534 /* condition not met: network device is a slave */
2535 if (!mdev->upper[i])
2536 do_bond = false;
2537 if (num_eth_ports < 2)
2538 continue;
2539 /* condition not met: same master */
2540 if (mdev->upper[i] != mdev->upper[i-1])
2541 do_bond = false;
2542 }
2543 /* condition not met: 2 salves */
2544 do_bond = (num_eth_ports == 2) ? do_bond : false;
2545
2546 /* handle only events that come with enough info */
2547 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2548 return NOTIFY_DONE;
2549
2550 priv = netdev_priv(ndev);
2551 if (do_bond) {
2552 struct netdev_notifier_bonding_info *notifier_info = ptr;
2553 struct netdev_bonding_info *bonding_info =
2554 &notifier_info->bonding_info;
2555
2556 /* required mode 1, 2 or 4 */
2557 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
2558 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
2559 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
2560 do_bond = false;
2561
2562 /* require exactly 2 slaves */
2563 if (bonding_info->master.num_slaves != 2)
2564 do_bond = false;
2565
2566 /* calc v2p */
2567 if (do_bond) {
2568 if (bonding_info->master.bond_mode ==
2569 BOND_MODE_ACTIVEBACKUP) {
2570 /* in active-backup mode virtual ports are
2571 * mapped to the physical port of the active
2572 * slave */
2573 if (bonding_info->slave.state ==
2574 BOND_STATE_BACKUP) {
2575 if (port == 1) {
2576 v2p_port1 = 2;
2577 v2p_port2 = 2;
2578 } else {
2579 v2p_port1 = 1;
2580 v2p_port2 = 1;
2581 }
2582 } else { /* BOND_STATE_ACTIVE */
2583 if (port == 1) {
2584 v2p_port1 = 1;
2585 v2p_port2 = 1;
2586 } else {
2587 v2p_port1 = 2;
2588 v2p_port2 = 2;
2589 }
2590 }
2591 } else { /* Active-Active */
2592 /* in active-active mode a virtual port is
2593 * mapped to the native physical port if and only
2594 * if the physical port is up */
2595 __s8 link = bonding_info->slave.link;
2596
2597 if (port == 1)
2598 v2p_port2 = 2;
2599 else
2600 v2p_port1 = 1;
2601 if ((link == BOND_LINK_UP) ||
2602 (link == BOND_LINK_FAIL)) {
2603 if (port == 1)
2604 v2p_port1 = 1;
2605 else
2606 v2p_port2 = 2;
2607 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
2608 if (port == 1)
2609 v2p_port1 = 2;
2610 else
2611 v2p_port2 = 1;
2612 }
2613 }
2614 }
2615 }
2616
2617 mlx4_en_queue_bond_work(priv, do_bond,
2618 v2p_port1, v2p_port2);
2619
2620 return NOTIFY_DONE;
2621}
2622
2444int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2623int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2445 struct mlx4_en_port_profile *prof) 2624 struct mlx4_en_port_profile *prof)
2446{ 2625{
@@ -2458,7 +2637,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2458 netif_set_real_num_tx_queues(dev, prof->tx_ring_num); 2637 netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
2459 netif_set_real_num_rx_queues(dev, prof->rx_ring_num); 2638 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2460 2639
2461 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 2640 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
2462 dev->dev_port = port - 1; 2641 dev->dev_port = port - 1;
2463 2642
2464 /* 2643 /*
@@ -2623,6 +2802,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2623 } 2802 }
2624 2803
2625 mdev->pndev[port] = dev; 2804 mdev->pndev[port] = dev;
2805 mdev->upper[port] = NULL;
2626 2806
2627 netif_carrier_off(dev); 2807 netif_carrier_off(dev);
2628 mlx4_en_set_default_moderation(priv); 2808 mlx4_en_set_default_moderation(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index f1a5500ff72d..34f2fdf4fe5d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -50,10 +50,14 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
50 context->mtu_msgmax = 0xff; 50 context->mtu_msgmax = 0xff;
51 if (!is_tx && !rss) 51 if (!is_tx && !rss)
52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); 52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
53 if (is_tx) 53 if (is_tx) {
54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); 54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
55 else 55 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
56 context->params2 |= MLX4_QP_BIT_FPP;
57
58 } else {
56 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
60 }
57 context->usr_page = cpu_to_be32(mdev->priv_uar.index); 61 context->usr_page = cpu_to_be32(mdev->priv_uar.index);
58 context->local_qpn = cpu_to_be32(qpn); 62 context->local_qpn = cpu_to_be32(qpn);
59 context->pri_path.ackto = 1 & 0x07; 63 context->pri_path.ackto = 1 & 0x07;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index a0474eb94aa3..698d60de1255 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -162,6 +162,10 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
162 if (mlx4_alloc_pages(priv, &ring->page_alloc[i], 162 if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
163 frag_info, GFP_KERNEL | __GFP_COLD)) 163 frag_info, GFP_KERNEL | __GFP_COLD))
164 goto out; 164 goto out;
165
166 en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n",
167 i, ring->page_alloc[i].page_size,
168 atomic_read(&ring->page_alloc[i].page->_count));
165 } 169 }
166 return 0; 170 return 0;
167 171
@@ -387,10 +391,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
387 ring->rx_info, tmp); 391 ring->rx_info, tmp);
388 392
389 /* Allocate HW buffers on provided NUMA node */ 393 /* Allocate HW buffers on provided NUMA node */
390 set_dev_node(&mdev->dev->pdev->dev, node); 394 set_dev_node(&mdev->dev->persist->pdev->dev, node);
391 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 395 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
392 ring->buf_size, 2 * PAGE_SIZE); 396 ring->buf_size, 2 * PAGE_SIZE);
393 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); 397 set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
394 if (err) 398 if (err)
395 goto err_info; 399 goto err_info;
396 400
@@ -1059,8 +1063,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
1059 (eff_mtu > buf_size + frag_sizes[i]) ? 1063 (eff_mtu > buf_size + frag_sizes[i]) ?
1060 frag_sizes[i] : eff_mtu - buf_size; 1064 frag_sizes[i] : eff_mtu - buf_size;
1061 priv->frag_info[i].frag_prefix_size = buf_size; 1065 priv->frag_info[i].frag_prefix_size = buf_size;
1062 priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i], 1066 priv->frag_info[i].frag_stride =
1063 SMP_CACHE_BYTES); 1067 ALIGN(priv->frag_info[i].frag_size,
1068 SMP_CACHE_BYTES);
1064 buf_size += priv->frag_info[i].frag_size; 1069 buf_size += priv->frag_info[i].frag_size;
1065 i++; 1070 i++;
1066 } 1071 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index e3357bf523df..55f9f5c5344e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -91,10 +91,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
91 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); 91 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
92 92
93 /* Allocate HW buffers on provided NUMA node */ 93 /* Allocate HW buffers on provided NUMA node */
94 set_dev_node(&mdev->dev->pdev->dev, node); 94 set_dev_node(&mdev->dev->persist->pdev->dev, node);
95 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 95 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
96 2 * PAGE_SIZE); 96 2 * PAGE_SIZE);
97 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); 97 set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
98 if (err) { 98 if (err) {
99 en_err(priv, "Failed allocating hwq resources\n"); 99 en_err(priv, "Failed allocating hwq resources\n");
100 goto err_bounce; 100 goto err_bounce;
@@ -682,8 +682,8 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
682 if (dev->num_tc) 682 if (dev->num_tc)
683 return skb_tx_hash(dev, skb); 683 return skb_tx_hash(dev, skb);
684 684
685 if (vlan_tx_tag_present(skb)) 685 if (skb_vlan_tag_present(skb))
686 up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; 686 up = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
687 687
688 return fallback(dev, skb) % rings_p_up + up * rings_p_up; 688 return fallback(dev, skb) % rings_p_up + up * rings_p_up;
689} 689}
@@ -742,8 +742,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
742 goto tx_drop; 742 goto tx_drop;
743 } 743 }
744 744
745 if (vlan_tx_tag_present(skb)) 745 if (skb_vlan_tag_present(skb))
746 vlan_tag = vlan_tx_tag_get(skb); 746 vlan_tag = skb_vlan_tag_get(skb);
747 747
748 748
749 netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); 749 netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -930,7 +930,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
930 real_size = (real_size / 16) & 0x3f; 930 real_size = (real_size / 16) & 0x3f;
931 931
932 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && 932 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce &&
933 !vlan_tx_tag_present(skb) && send_doorbell) { 933 !skb_vlan_tag_present(skb) && send_doorbell) {
934 tx_desc->ctrl.bf_qpn = ring->doorbell_qpn | 934 tx_desc->ctrl.bf_qpn = ring->doorbell_qpn |
935 cpu_to_be32(real_size); 935 cpu_to_be32(real_size);
936 936
@@ -952,7 +952,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
952 } else { 952 } else {
953 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); 953 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
954 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * 954 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
955 !!vlan_tx_tag_present(skb); 955 !!skb_vlan_tag_present(skb);
956 tx_desc->ctrl.fence_size = real_size; 956 tx_desc->ctrl.fence_size = real_size;
957 957
958 /* Ensure new descriptor hits memory 958 /* Ensure new descriptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 3d275fbaf0eb..264bc15c1ff2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -88,6 +88,8 @@ static u64 get_async_ev_mask(struct mlx4_dev *dev)
88 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; 88 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) 89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
90 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); 90 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
91 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
92 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT);
91 93
92 return async_ev_mask; 94 return async_ev_mask;
93} 95}
@@ -237,7 +239,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
237 struct mlx4_eqe eqe; 239 struct mlx4_eqe eqe;
238 240
239 /*don't send if we don't have the that slave */ 241 /*don't send if we don't have the that slave */
240 if (dev->num_vfs < slave) 242 if (dev->persist->num_vfs < slave)
241 return 0; 243 return 0;
242 memset(&eqe, 0, sizeof eqe); 244 memset(&eqe, 0, sizeof eqe);
243 245
@@ -255,7 +257,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
255 struct mlx4_eqe eqe; 257 struct mlx4_eqe eqe;
256 258
257 /*don't send if we don't have the that slave */ 259 /*don't send if we don't have the that slave */
258 if (dev->num_vfs < slave) 260 if (dev->persist->num_vfs < slave)
259 return 0; 261 return 0;
260 memset(&eqe, 0, sizeof eqe); 262 memset(&eqe, 0, sizeof eqe);
261 263
@@ -310,7 +312,7 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
310 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, 312 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
311 port); 313 port);
312 314
313 for (i = 0; i < dev->num_vfs + 1; i++) 315 for (i = 0; i < dev->persist->num_vfs + 1; i++)
314 if (test_bit(i, slaves_pport.slaves)) 316 if (test_bit(i, slaves_pport.slaves))
315 set_and_calc_slave_port_state(dev, i, port, 317 set_and_calc_slave_port_state(dev, i, port,
316 event, &gen_event); 318 event, &gen_event);
@@ -429,8 +431,14 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
429 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { 431 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
430 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n", 432 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
431 i); 433 i);
432 434 /* In case of 'Reset flow' FLR can be generated for
433 mlx4_delete_all_resources_for_slave(dev, i); 435 * a slave before mlx4_load_one is done.
436 * make sure interface is up before trying to delete
437 * slave resources which weren't allocated yet.
438 */
439 if (dev->persist->interface_state &
440 MLX4_INTERFACE_STATE_UP)
441 mlx4_delete_all_resources_for_slave(dev, i);
434 /*return the slave to running mode*/ 442 /*return the slave to running mode*/
435 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 443 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
436 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; 444 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
@@ -560,7 +568,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
560 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 568 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
561 if (!mlx4_is_master(dev)) 569 if (!mlx4_is_master(dev))
562 break; 570 break;
563 for (i = 0; i < dev->num_vfs + 1; i++) { 571 for (i = 0; i < dev->persist->num_vfs + 1;
572 i++) {
564 if (!test_bit(i, slaves_port.slaves)) 573 if (!test_bit(i, slaves_port.slaves))
565 continue; 574 continue;
566 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 575 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
@@ -596,7 +605,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
596 if (!mlx4_is_master(dev)) 605 if (!mlx4_is_master(dev))
597 break; 606 break;
598 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 607 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
599 for (i = 0; i < dev->num_vfs + 1; i++) { 608 for (i = 0;
609 i < dev->persist->num_vfs + 1;
610 i++) {
600 if (!test_bit(i, slaves_port.slaves)) 611 if (!test_bit(i, slaves_port.slaves))
601 continue; 612 continue;
602 if (i == mlx4_master_func_num(dev)) 613 if (i == mlx4_master_func_num(dev))
@@ -727,6 +738,26 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
727 (unsigned long) eqe); 738 (unsigned long) eqe);
728 break; 739 break;
729 740
741 case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
742 switch (eqe->subtype) {
743 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
744 mlx4_warn(dev, "Bad cable detected on port %u\n",
745 eqe->event.bad_cable.port);
746 break;
747 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
748 mlx4_warn(dev, "Unsupported cable detected\n");
749 break;
750 default:
751 mlx4_dbg(dev,
752 "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n",
753 eqe->type, eqe->subtype, eq->eqn,
754 eq->cons_index, eqe->owner, eq->nent,
755 !!(eqe->owner & 0x80) ^
756 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
757 break;
758 }
759 break;
760
730 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 761 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
731 case MLX4_EVENT_TYPE_ECC_DETECT: 762 case MLX4_EVENT_TYPE_ECC_DETECT:
732 default: 763 default:
@@ -837,12 +868,10 @@ static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
837 MLX4_CMD_WRAPPED); 868 MLX4_CMD_WRAPPED);
838} 869}
839 870
840static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 871static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num)
841 int eq_num)
842{ 872{
843 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 873 return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ,
844 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, 874 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
845 MLX4_CMD_WRAPPED);
846} 875}
847 876
848static int mlx4_num_eq_uar(struct mlx4_dev *dev) 877static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -865,7 +894,7 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
865 894
866 if (!priv->eq_table.uar_map[index]) { 895 if (!priv->eq_table.uar_map[index]) {
867 priv->eq_table.uar_map[index] = 896 priv->eq_table.uar_map[index] =
868 ioremap(pci_resource_start(dev->pdev, 2) + 897 ioremap(pci_resource_start(dev->persist->pdev, 2) +
869 ((eq->eqn / 4) << PAGE_SHIFT), 898 ((eq->eqn / 4) << PAGE_SHIFT),
870 PAGE_SIZE); 899 PAGE_SIZE);
871 if (!priv->eq_table.uar_map[index]) { 900 if (!priv->eq_table.uar_map[index]) {
@@ -928,8 +957,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
928 eq_context = mailbox->buf; 957 eq_context = mailbox->buf;
929 958
930 for (i = 0; i < npages; ++i) { 959 for (i = 0; i < npages; ++i) {
931 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, 960 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist->
932 PAGE_SIZE, &t, GFP_KERNEL); 961 pdev->dev,
962 PAGE_SIZE, &t,
963 GFP_KERNEL);
933 if (!eq->page_list[i].buf) 964 if (!eq->page_list[i].buf)
934 goto err_out_free_pages; 965 goto err_out_free_pages;
935 966
@@ -995,7 +1026,7 @@ err_out_free_eq:
995err_out_free_pages: 1026err_out_free_pages:
996 for (i = 0; i < npages; ++i) 1027 for (i = 0; i < npages; ++i)
997 if (eq->page_list[i].buf) 1028 if (eq->page_list[i].buf)
998 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 1029 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
999 eq->page_list[i].buf, 1030 eq->page_list[i].buf,
1000 eq->page_list[i].map); 1031 eq->page_list[i].map);
1001 1032
@@ -1013,7 +1044,6 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
1013 struct mlx4_eq *eq) 1044 struct mlx4_eq *eq)
1014{ 1045{
1015 struct mlx4_priv *priv = mlx4_priv(dev); 1046 struct mlx4_priv *priv = mlx4_priv(dev);
1016 struct mlx4_cmd_mailbox *mailbox;
1017 int err; 1047 int err;
1018 int i; 1048 int i;
1019 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with 1049 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
@@ -1021,36 +1051,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
1021 */ 1051 */
1022 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; 1052 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
1023 1053
1024 mailbox = mlx4_alloc_cmd_mailbox(dev); 1054 err = mlx4_HW2SW_EQ(dev, eq->eqn);
1025 if (IS_ERR(mailbox))
1026 return;
1027
1028 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
1029 if (err) 1055 if (err)
1030 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); 1056 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
1031 1057
1032 if (0) {
1033 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
1034 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
1035 if (i % 4 == 0)
1036 pr_cont("[%02x] ", i * 4);
1037 pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
1038 if ((i + 1) % 4 == 0)
1039 pr_cont("\n");
1040 }
1041 }
1042 synchronize_irq(eq->irq); 1058 synchronize_irq(eq->irq);
1043 tasklet_disable(&eq->tasklet_ctx.task); 1059 tasklet_disable(&eq->tasklet_ctx.task);
1044 1060
1045 mlx4_mtt_cleanup(dev, &eq->mtt); 1061 mlx4_mtt_cleanup(dev, &eq->mtt);
1046 for (i = 0; i < npages; ++i) 1062 for (i = 0; i < npages; ++i)
1047 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 1063 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
1048 eq->page_list[i].buf, 1064 eq->page_list[i].buf,
1049 eq->page_list[i].map); 1065 eq->page_list[i].map);
1050 1066
1051 kfree(eq->page_list); 1067 kfree(eq->page_list);
1052 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); 1068 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
1053 mlx4_free_cmd_mailbox(dev, mailbox);
1054} 1069}
1055 1070
1056static void mlx4_free_irqs(struct mlx4_dev *dev) 1071static void mlx4_free_irqs(struct mlx4_dev *dev)
@@ -1060,7 +1075,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
1060 int i, vec; 1075 int i, vec;
1061 1076
1062 if (eq_table->have_irq) 1077 if (eq_table->have_irq)
1063 free_irq(dev->pdev->irq, dev); 1078 free_irq(dev->persist->pdev->irq, dev);
1064 1079
1065 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 1080 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
1066 if (eq_table->eq[i].have_irq) { 1081 if (eq_table->eq[i].have_irq) {
@@ -1089,7 +1104,8 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
1089{ 1104{
1090 struct mlx4_priv *priv = mlx4_priv(dev); 1105 struct mlx4_priv *priv = mlx4_priv(dev);
1091 1106
1092 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + 1107 priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev,
1108 priv->fw.clr_int_bar) +
1093 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 1109 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
1094 if (!priv->clr_base) { 1110 if (!priv->clr_base) {
1095 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); 1111 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
@@ -1212,13 +1228,13 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
1212 i * MLX4_IRQNAME_SIZE, 1228 i * MLX4_IRQNAME_SIZE,
1213 MLX4_IRQNAME_SIZE, 1229 MLX4_IRQNAME_SIZE,
1214 "mlx4-comp-%d@pci:%s", i, 1230 "mlx4-comp-%d@pci:%s", i,
1215 pci_name(dev->pdev)); 1231 pci_name(dev->persist->pdev));
1216 } else { 1232 } else {
1217 snprintf(priv->eq_table.irq_names + 1233 snprintf(priv->eq_table.irq_names +
1218 i * MLX4_IRQNAME_SIZE, 1234 i * MLX4_IRQNAME_SIZE,
1219 MLX4_IRQNAME_SIZE, 1235 MLX4_IRQNAME_SIZE,
1220 "mlx4-async@pci:%s", 1236 "mlx4-async@pci:%s",
1221 pci_name(dev->pdev)); 1237 pci_name(dev->persist->pdev));
1222 } 1238 }
1223 1239
1224 eq_name = priv->eq_table.irq_names + 1240 eq_name = priv->eq_table.irq_names +
@@ -1235,8 +1251,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
1235 snprintf(priv->eq_table.irq_names, 1251 snprintf(priv->eq_table.irq_names,
1236 MLX4_IRQNAME_SIZE, 1252 MLX4_IRQNAME_SIZE,
1237 DRV_NAME "@pci:%s", 1253 DRV_NAME "@pci:%s",
1238 pci_name(dev->pdev)); 1254 pci_name(dev->persist->pdev));
1239 err = request_irq(dev->pdev->irq, mlx4_interrupt, 1255 err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
1240 IRQF_SHARED, priv->eq_table.irq_names, dev); 1256 IRQF_SHARED, priv->eq_table.irq_names, dev);
1241 if (err) 1257 if (err)
1242 goto err_out_async; 1258 goto err_out_async;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 982861d1df44..5a21e5dc94cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -84,13 +84,10 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
84 [ 1] = "UC transport", 84 [ 1] = "UC transport",
85 [ 2] = "UD transport", 85 [ 2] = "UD transport",
86 [ 3] = "XRC transport", 86 [ 3] = "XRC transport",
87 [ 4] = "reliable multicast",
88 [ 5] = "FCoIB support",
89 [ 6] = "SRQ support", 87 [ 6] = "SRQ support",
90 [ 7] = "IPoIB checksum offload", 88 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter", 89 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter", 90 [ 9] = "Q_Key violation counter",
93 [10] = "VMM",
94 [12] = "Dual Port Different Protocol (DPDP) support", 91 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers", 92 [15] = "Big LSO headers",
96 [16] = "MW support", 93 [16] = "MW support",
@@ -99,12 +96,11 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
99 [19] = "Raw multicast support", 96 [19] = "Raw multicast support",
100 [20] = "Address vector port checking support", 97 [20] = "Address vector port checking support",
101 [21] = "UD multicast support", 98 [21] = "UD multicast support",
102 [24] = "Demand paging support",
103 [25] = "Router support",
104 [30] = "IBoE support", 99 [30] = "IBoE support",
105 [32] = "Unicast loopback support", 100 [32] = "Unicast loopback support",
106 [34] = "FCS header control", 101 [34] = "FCS header control",
107 [38] = "Wake On LAN support", 102 [37] = "Wake On LAN (port1) support",
103 [38] = "Wake On LAN (port2) support",
108 [40] = "UDP RSS support", 104 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support", 105 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support", 106 [42] = "Multicast VEP steering support",
@@ -145,7 +141,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
145 [16] = "CONFIG DEV support", 141 [16] = "CONFIG DEV support",
146 [17] = "Asymmetric EQs support", 142 [17] = "Asymmetric EQs support",
147 [18] = "More than 80 VFs support", 143 [18] = "More than 80 VFs support",
148 [19] = "Performance optimized for limited rule configuration flow steering support" 144 [19] = "Performance optimized for limited rule configuration flow steering support",
145 [20] = "Recoverable error events support",
146 [21] = "Port Remap support"
149 }; 147 };
150 int i; 148 int i;
151 149
@@ -259,6 +257,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
259#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 257#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
260#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 258#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
261#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 259#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
260#define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48
262 261
263#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 262#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
264#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 263#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
@@ -273,6 +272,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
273#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 272#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
274#define QUERY_FUNC_CAP_FLAG_ETH 0x80 273#define QUERY_FUNC_CAP_FLAG_ETH 0x80
275#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 274#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
275#define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08
276#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 276#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
277 277
278#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) 278#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
@@ -344,9 +344,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
344 } else if (vhcr->op_modifier == 0) { 344 } else if (vhcr->op_modifier == 0) {
345 struct mlx4_active_ports actv_ports = 345 struct mlx4_active_ports actv_ports =
346 mlx4_get_active_ports(dev, slave); 346 mlx4_get_active_ports(dev, slave);
347 /* enable rdma and ethernet interfaces, and new quota locations */ 347 /* enable rdma and ethernet interfaces, new quota locations,
348 * and reserved lkey
349 */
348 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 350 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
349 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX); 351 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
352 QUERY_FUNC_CAP_FLAG_RESD_LKEY);
350 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 353 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
351 354
352 field = min( 355 field = min(
@@ -411,6 +414,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
411 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | 414 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
412 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; 415 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
413 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 416 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
417
418 size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
419 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
414 } else 420 } else
415 err = -EINVAL; 421 err = -EINVAL;
416 422
@@ -503,6 +509,13 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
503 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 509 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
504 func_cap->reserved_eq = size & 0xFFFFFF; 510 func_cap->reserved_eq = size & 0xFFFFFF;
505 511
512 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
513 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
514 func_cap->reserved_lkey = size;
515 } else {
516 func_cap->reserved_lkey = 0;
517 }
518
506 func_cap->extra_flags = 0; 519 func_cap->extra_flags = 0;
507 520
508 /* Mailbox data from 0x6c and onward should only be treated if 521 /* Mailbox data from 0x6c and onward should only be treated if
@@ -851,6 +864,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
851 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 864 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
852 MLX4_GET(dev_cap->bmme_flags, outbox, 865 MLX4_GET(dev_cap->bmme_flags, outbox,
853 QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 866 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
867 if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP)
868 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP;
854 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 869 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
855 if (field & 0x20) 870 if (field & 0x20)
856 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; 871 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
@@ -859,6 +874,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
859 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); 874 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
860 if (field32 & (1 << 0)) 875 if (field32 & (1 << 0))
861 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; 876 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
877 if (field32 & (1 << 7))
878 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
862 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); 879 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
863 if (field & 1<<6) 880 if (field & 1<<6)
864 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; 881 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
@@ -1106,9 +1123,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1106 field &= 0x7f; 1123 field &= 0x7f;
1107 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); 1124 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
1108 1125
1109 /* For guests, disable mw type 2 */ 1126 /* For guests, disable mw type 2 and port remap*/
1110 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1127 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1111 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; 1128 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
1129 bmme_flags &= ~MLX4_FLAG_PORT_REMAP;
1112 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1130 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1113 1131
1114 /* turn off device-managed steering capability if not enabled */ 1132 /* turn off device-managed steering capability if not enabled */
@@ -1562,6 +1580,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1562#define INIT_HCA_VXLAN_OFFSET 0x0c 1580#define INIT_HCA_VXLAN_OFFSET 0x0c
1563#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e 1581#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1564#define INIT_HCA_FLAGS_OFFSET 0x014 1582#define INIT_HCA_FLAGS_OFFSET 0x014
1583#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1565#define INIT_HCA_QPC_OFFSET 0x020 1584#define INIT_HCA_QPC_OFFSET 0x020
1566#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 1585#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1567#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) 1586#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
@@ -1668,6 +1687,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1668 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1687 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1669 } 1688 }
1670 1689
1690 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
1691 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
1692
1671 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 1693 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1672 1694
1673 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 1695 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
@@ -1752,8 +1774,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1752 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); 1774 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
1753 } 1775 }
1754 1776
1755 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, 1777 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
1756 MLX4_CMD_NATIVE); 1778 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1757 1779
1758 if (err) 1780 if (err)
1759 mlx4_err(dev, "INIT_HCA returns %d\n", err); 1781 mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -1879,6 +1901,36 @@ out:
1879 return err; 1901 return err;
1880} 1902}
1881 1903
1904static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
1905{
1906 struct mlx4_cmd_mailbox *mailbox;
1907 __be32 *outbox;
1908 int err;
1909
1910 mailbox = mlx4_alloc_cmd_mailbox(dev);
1911 if (IS_ERR(mailbox)) {
1912 mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
1913 return PTR_ERR(mailbox);
1914 }
1915 outbox = mailbox->buf;
1916
1917 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1918 MLX4_CMD_QUERY_HCA,
1919 MLX4_CMD_TIME_CLASS_B,
1920 !mlx4_is_slave(dev));
1921 if (err) {
1922 mlx4_warn(dev, "hca_core_clock update failed\n");
1923 goto out;
1924 }
1925
1926 MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1927
1928out:
1929 mlx4_free_cmd_mailbox(dev, mailbox);
1930
1931 return err;
1932}
1933
1882/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 1934/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1883 * and real QP0 are active, so that the paravirtualized QP0 is ready 1935 * and real QP0 are active, so that the paravirtualized QP0 is ready
1884 * to operate */ 1936 * to operate */
@@ -1983,6 +2035,9 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1983 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2035 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1984 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2036 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1985 2037
2038 if (!err)
2039 mlx4_hca_core_clock_update(dev);
2040
1986 return err; 2041 return err;
1987} 2042}
1988EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); 2043EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
@@ -2007,7 +2062,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2007 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2062 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2008 if (priv->mfunc.master.init_port_ref[port] == 1) { 2063 if (priv->mfunc.master.init_port_ref[port] == 1) {
2009 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2064 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2010 1000, MLX4_CMD_NATIVE); 2065 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2011 if (err) 2066 if (err)
2012 return err; 2067 return err;
2013 } 2068 }
@@ -2018,7 +2073,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2018 if (!priv->mfunc.master.qp0_state[port].qp0_active && 2073 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
2019 priv->mfunc.master.qp0_state[port].port_active) { 2074 priv->mfunc.master.qp0_state[port].port_active) {
2020 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2075 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2021 1000, MLX4_CMD_NATIVE); 2076 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2022 if (err) 2077 if (err)
2023 return err; 2078 return err;
2024 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2079 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
@@ -2033,15 +2088,15 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2033 2088
2034int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) 2089int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
2035{ 2090{
2036 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, 2091 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2037 MLX4_CMD_WRAPPED); 2092 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2038} 2093}
2039EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); 2094EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
2040 2095
2041int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) 2096int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
2042{ 2097{
2043 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000, 2098 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
2044 MLX4_CMD_NATIVE); 2099 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
2045} 2100}
2046 2101
2047struct mlx4_config_dev { 2102struct mlx4_config_dev {
@@ -2049,13 +2104,16 @@ struct mlx4_config_dev {
2049 __be32 rsvd1[3]; 2104 __be32 rsvd1[3];
2050 __be16 vxlan_udp_dport; 2105 __be16 vxlan_udp_dport;
2051 __be16 rsvd2; 2106 __be16 rsvd2;
2052 __be32 rsvd3[27]; 2107 __be32 rsvd3;
2053 __be16 rsvd4; 2108 __be32 roce_flags;
2054 u8 rsvd5; 2109 __be32 rsvd4[25];
2110 __be16 rsvd5;
2111 u8 rsvd6;
2055 u8 rx_checksum_val; 2112 u8 rx_checksum_val;
2056}; 2113};
2057 2114
2058#define MLX4_VXLAN_UDP_DPORT (1 << 0) 2115#define MLX4_VXLAN_UDP_DPORT (1 << 0)
2116#define MLX4_DISABLE_RX_PORT BIT(18)
2059 2117
2060static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2118static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2061{ 2119{
@@ -2111,7 +2169,7 @@ static const u8 config_dev_csum_flags[] = {
2111int mlx4_config_dev_retrieval(struct mlx4_dev *dev, 2169int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
2112 struct mlx4_config_dev_params *params) 2170 struct mlx4_config_dev_params *params)
2113{ 2171{
2114 struct mlx4_config_dev config_dev; 2172 struct mlx4_config_dev config_dev = {0};
2115 int err; 2173 int err;
2116 u8 csum_mask; 2174 u8 csum_mask;
2117 2175
@@ -2158,6 +2216,45 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
2158} 2216}
2159EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); 2217EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
2160 2218
2219#define CONFIG_DISABLE_RX_PORT BIT(15)
2220int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis)
2221{
2222 struct mlx4_config_dev config_dev;
2223
2224 memset(&config_dev, 0, sizeof(config_dev));
2225 config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT);
2226 if (dis)
2227 config_dev.roce_flags =
2228 cpu_to_be32(CONFIG_DISABLE_RX_PORT);
2229
2230 return mlx4_CONFIG_DEV_set(dev, &config_dev);
2231}
2232
2233int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2)
2234{
2235 struct mlx4_cmd_mailbox *mailbox;
2236 struct {
2237 __be32 v_port1;
2238 __be32 v_port2;
2239 } *v2p;
2240 int err;
2241
2242 mailbox = mlx4_alloc_cmd_mailbox(dev);
2243 if (IS_ERR(mailbox))
2244 return -ENOMEM;
2245
2246 v2p = mailbox->buf;
2247 v2p->v_port1 = cpu_to_be32(port1);
2248 v2p->v_port2 = cpu_to_be32(port2);
2249
2250 err = mlx4_cmd(dev, mailbox->dma, 0,
2251 MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP,
2252 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2253
2254 mlx4_free_cmd_mailbox(dev, mailbox);
2255 return err;
2256}
2257
2161 2258
2162int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) 2259int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2163{ 2260{
@@ -2180,7 +2277,8 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2180int mlx4_NOP(struct mlx4_dev *dev) 2277int mlx4_NOP(struct mlx4_dev *dev)
2181{ 2278{
2182 /* Input modifier of 0x1f means "finish as soon as possible." */ 2279 /* Input modifier of 0x1f means "finish as soon as possible." */
2183 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE); 2280 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
2281 MLX4_CMD_NATIVE);
2184} 2282}
2185 2283
2186int mlx4_get_phys_port_id(struct mlx4_dev *dev) 2284int mlx4_get_phys_port_id(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 62562b60fa87..f44f7f6017ed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -147,6 +147,7 @@ struct mlx4_func_cap {
147 u32 qp0_proxy_qpn; 147 u32 qp0_proxy_qpn;
148 u32 qp1_tunnel_qpn; 148 u32 qp1_tunnel_qpn;
149 u32 qp1_proxy_qpn; 149 u32 qp1_proxy_qpn;
150 u32 reserved_lkey;
150 u8 physical_port; 151 u8 physical_port;
151 u8 port_flags; 152 u8 port_flags;
152 u8 flags1; 153 u8 flags1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 97c9b1db1d27..2a9dd460a95f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -56,7 +56,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
56 int i; 56 int i;
57 57
58 if (chunk->nsg > 0) 58 if (chunk->nsg > 0)
59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, 59 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
60 PCI_DMA_BIDIRECTIONAL); 60 PCI_DMA_BIDIRECTIONAL);
61 61
62 for (i = 0; i < chunk->npages; ++i) 62 for (i = 0; i < chunk->npages; ++i)
@@ -69,7 +69,8 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
69 int i; 69 int i;
70 70
71 for (i = 0; i < chunk->npages; ++i) 71 for (i = 0; i < chunk->npages; ++i)
72 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, 72 dma_free_coherent(&dev->persist->pdev->dev,
73 chunk->mem[i].length,
73 lowmem_page_address(sg_page(&chunk->mem[i])), 74 lowmem_page_address(sg_page(&chunk->mem[i])),
74 sg_dma_address(&chunk->mem[i])); 75 sg_dma_address(&chunk->mem[i]));
75} 76}
@@ -173,7 +174,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
173 --cur_order; 174 --cur_order;
174 175
175 if (coherent) 176 if (coherent)
176 ret = mlx4_alloc_icm_coherent(&dev->pdev->dev, 177 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
177 &chunk->mem[chunk->npages], 178 &chunk->mem[chunk->npages],
178 cur_order, gfp_mask); 179 cur_order, gfp_mask);
179 else 180 else
@@ -193,7 +194,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
193 if (coherent) 194 if (coherent)
194 ++chunk->nsg; 195 ++chunk->nsg;
195 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { 196 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
196 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 197 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
197 chunk->npages, 198 chunk->npages,
198 PCI_DMA_BIDIRECTIONAL); 199 PCI_DMA_BIDIRECTIONAL);
199 200
@@ -208,7 +209,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
208 } 209 }
209 210
210 if (!coherent && chunk) { 211 if (!coherent && chunk) {
211 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 212 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
212 chunk->npages, 213 chunk->npages,
213 PCI_DMA_BIDIRECTIONAL); 214 PCI_DMA_BIDIRECTIONAL);
214 215
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 116895ac8b35..6fce58718837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -33,11 +33,13 @@
33 33
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/export.h> 35#include <linux/export.h>
36#include <linux/errno.h>
36 37
37#include "mlx4.h" 38#include "mlx4.h"
38 39
39struct mlx4_device_context { 40struct mlx4_device_context {
40 struct list_head list; 41 struct list_head list;
42 struct list_head bond_list;
41 struct mlx4_interface *intf; 43 struct mlx4_interface *intf;
42 void *context; 44 void *context;
43}; 45};
@@ -115,6 +117,58 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
115} 117}
116EXPORT_SYMBOL_GPL(mlx4_unregister_interface); 118EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
117 119
120int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
121{
122 struct mlx4_priv *priv = mlx4_priv(dev);
123 struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
124 unsigned long flags;
125 int ret;
126 LIST_HEAD(bond_list);
127
128 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
129 return -ENOTSUPP;
130
131 ret = mlx4_disable_rx_port_check(dev, enable);
132 if (ret) {
133 mlx4_err(dev, "Fail to %s rx port check\n",
134 enable ? "enable" : "disable");
135 return ret;
136 }
137 if (enable) {
138 dev->flags |= MLX4_FLAG_BONDED;
139 } else {
140 ret = mlx4_virt2phy_port_map(dev, 1, 2);
141 if (ret) {
142 mlx4_err(dev, "Fail to reset port map\n");
143 return ret;
144 }
145 dev->flags &= ~MLX4_FLAG_BONDED;
146 }
147
148 spin_lock_irqsave(&priv->ctx_lock, flags);
149 list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
150 if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
151 list_add_tail(&dev_ctx->bond_list, &bond_list);
152 list_del(&dev_ctx->list);
153 }
154 }
155 spin_unlock_irqrestore(&priv->ctx_lock, flags);
156
157 list_for_each_entry(dev_ctx, &bond_list, bond_list) {
158 dev_ctx->intf->remove(dev, dev_ctx->context);
159 dev_ctx->context = dev_ctx->intf->add(dev);
160
161 spin_lock_irqsave(&priv->ctx_lock, flags);
162 list_add_tail(&dev_ctx->list, &priv->ctx_list);
163 spin_unlock_irqrestore(&priv->ctx_lock, flags);
164
165 mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
166 dev_ctx->intf->protocol, enable ?
167 "enabled" : "disabled");
168 }
169 return 0;
170}
171
118void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, 172void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
119 unsigned long param) 173 unsigned long param)
120{ 174{
@@ -138,13 +192,13 @@ int mlx4_register_device(struct mlx4_dev *dev)
138 192
139 mutex_lock(&intf_mutex); 193 mutex_lock(&intf_mutex);
140 194
195 dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
141 list_add_tail(&priv->dev_list, &dev_list); 196 list_add_tail(&priv->dev_list, &dev_list);
142 list_for_each_entry(intf, &intf_list, list) 197 list_for_each_entry(intf, &intf_list, list)
143 mlx4_add_device(intf, priv); 198 mlx4_add_device(intf, priv);
144 199
145 mutex_unlock(&intf_mutex); 200 mutex_unlock(&intf_mutex);
146 if (!mlx4_is_slave(dev)) 201 mlx4_start_catas_poll(dev);
147 mlx4_start_catas_poll(dev);
148 202
149 return 0; 203 return 0;
150} 204}
@@ -154,14 +208,14 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
154 struct mlx4_priv *priv = mlx4_priv(dev); 208 struct mlx4_priv *priv = mlx4_priv(dev);
155 struct mlx4_interface *intf; 209 struct mlx4_interface *intf;
156 210
157 if (!mlx4_is_slave(dev)) 211 mlx4_stop_catas_poll(dev);
158 mlx4_stop_catas_poll(dev);
159 mutex_lock(&intf_mutex); 212 mutex_lock(&intf_mutex);
160 213
161 list_for_each_entry(intf, &intf_list, list) 214 list_for_each_entry(intf, &intf_list, list)
162 mlx4_remove_device(intf, priv); 215 mlx4_remove_device(intf, priv);
163 216
164 list_del(&priv->dev_list); 217 list_del(&priv->dev_list);
218 dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
165 219
166 mutex_unlock(&intf_mutex); 220 mutex_unlock(&intf_mutex);
167} 221}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 6e08352ec994..7e487223489a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -108,6 +108,8 @@ MODULE_PARM_DESC(enable_64b_cqe_eqe,
108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
109 MLX4_FUNC_CAP_DMFS_A0_STATIC) 109 MLX4_FUNC_CAP_DMFS_A0_STATIC)
110 110
111#define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
112
111static char mlx4_version[] = 113static char mlx4_version[] =
112 DRV_NAME ": Mellanox ConnectX core driver v" 114 DRV_NAME ": Mellanox ConnectX core driver v"
113 DRV_VERSION " (" DRV_RELDATE ")\n"; 115 DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -249,7 +251,8 @@ static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
249 if (mlx4_is_master(dev)) 251 if (mlx4_is_master(dev))
250 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 252 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
251 } else { 253 } else {
252 mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n"); 254 if (cache_line_size() != 32 && cache_line_size() != 64)
255 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
253 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 256 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
254 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 257 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
255 } 258 }
@@ -318,10 +321,11 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
318 return -ENODEV; 321 return -ENODEV;
319 } 322 }
320 323
321 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 324 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
322 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 325 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
323 dev_cap->uar_size, 326 dev_cap->uar_size,
324 (unsigned long long) pci_resource_len(dev->pdev, 2)); 327 (unsigned long long)
328 pci_resource_len(dev->persist->pdev, 2));
325 return -ENODEV; 329 return -ENODEV;
326 } 330 }
327 331
@@ -541,8 +545,10 @@ static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
541 *speed = PCI_SPEED_UNKNOWN; 545 *speed = PCI_SPEED_UNKNOWN;
542 *width = PCIE_LNK_WIDTH_UNKNOWN; 546 *width = PCIE_LNK_WIDTH_UNKNOWN;
543 547
544 err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1); 548 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP,
545 err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2); 549 &lnkcap1);
550 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2,
551 &lnkcap2);
546 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 552 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
547 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 553 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
548 *speed = PCIE_SPEED_8_0GT; 554 *speed = PCIE_SPEED_8_0GT;
@@ -587,7 +593,7 @@ static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
587 return; 593 return;
588 } 594 }
589 595
590 err = pcie_get_minimum_link(dev->pdev, &speed, &width); 596 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width);
591 if (err || speed == PCI_SPEED_UNKNOWN || 597 if (err || speed == PCI_SPEED_UNKNOWN ||
592 width == PCIE_LNK_WIDTH_UNKNOWN) { 598 width == PCIE_LNK_WIDTH_UNKNOWN) {
593 mlx4_warn(dev, 599 mlx4_warn(dev,
@@ -792,6 +798,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
792 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 798 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
793 dev->caps.num_eqs = func_cap.max_eq; 799 dev->caps.num_eqs = func_cap.max_eq;
794 dev->caps.reserved_eqs = func_cap.reserved_eq; 800 dev->caps.reserved_eqs = func_cap.reserved_eq;
801 dev->caps.reserved_lkey = func_cap.reserved_lkey;
795 dev->caps.num_pds = MLX4_NUM_PDS; 802 dev->caps.num_pds = MLX4_NUM_PDS;
796 dev->caps.num_mgms = 0; 803 dev->caps.num_mgms = 0;
797 dev->caps.num_amgms = 0; 804 dev->caps.num_amgms = 0;
@@ -837,10 +844,12 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
837 844
838 if (dev->caps.uar_page_size * (dev->caps.num_uars - 845 if (dev->caps.uar_page_size * (dev->caps.num_uars -
839 dev->caps.reserved_uars) > 846 dev->caps.reserved_uars) >
840 pci_resource_len(dev->pdev, 2)) { 847 pci_resource_len(dev->persist->pdev,
848 2)) {
841 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 849 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
842 dev->caps.uar_page_size * dev->caps.num_uars, 850 dev->caps.uar_page_size * dev->caps.num_uars,
843 (unsigned long long) pci_resource_len(dev->pdev, 2)); 851 (unsigned long long)
852 pci_resource_len(dev->persist->pdev, 2));
844 goto err_mem; 853 goto err_mem;
845 } 854 }
846 855
@@ -1152,6 +1161,91 @@ err_set_port:
1152 return err ? err : count; 1161 return err ? err : count;
1153} 1162}
1154 1163
1164int mlx4_bond(struct mlx4_dev *dev)
1165{
1166 int ret = 0;
1167 struct mlx4_priv *priv = mlx4_priv(dev);
1168
1169 mutex_lock(&priv->bond_mutex);
1170
1171 if (!mlx4_is_bonded(dev))
1172 ret = mlx4_do_bond(dev, true);
1173 else
1174 ret = 0;
1175
1176 mutex_unlock(&priv->bond_mutex);
1177 if (ret)
1178 mlx4_err(dev, "Failed to bond device: %d\n", ret);
1179 else
1180 mlx4_dbg(dev, "Device is bonded\n");
1181 return ret;
1182}
1183EXPORT_SYMBOL_GPL(mlx4_bond);
1184
1185int mlx4_unbond(struct mlx4_dev *dev)
1186{
1187 int ret = 0;
1188 struct mlx4_priv *priv = mlx4_priv(dev);
1189
1190 mutex_lock(&priv->bond_mutex);
1191
1192 if (mlx4_is_bonded(dev))
1193 ret = mlx4_do_bond(dev, false);
1194
1195 mutex_unlock(&priv->bond_mutex);
1196 if (ret)
1197 mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1198 else
1199 mlx4_dbg(dev, "Device is unbonded\n");
1200 return ret;
1201}
1202EXPORT_SYMBOL_GPL(mlx4_unbond);
1203
1204
1205int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1206{
1207 u8 port1 = v2p->port1;
1208 u8 port2 = v2p->port2;
1209 struct mlx4_priv *priv = mlx4_priv(dev);
1210 int err;
1211
1212 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1213 return -ENOTSUPP;
1214
1215 mutex_lock(&priv->bond_mutex);
1216
1217 /* zero means keep current mapping for this port */
1218 if (port1 == 0)
1219 port1 = priv->v2p.port1;
1220 if (port2 == 0)
1221 port2 = priv->v2p.port2;
1222
1223 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1224 (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1225 (port1 == 2 && port2 == 1)) {
1226 /* besides boundary checks cross mapping makes
1227 * no sense and therefore not allowed */
1228 err = -EINVAL;
1229 } else if ((port1 == priv->v2p.port1) &&
1230 (port2 == priv->v2p.port2)) {
1231 err = 0;
1232 } else {
1233 err = mlx4_virt2phy_port_map(dev, port1, port2);
1234 if (!err) {
1235 mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1236 port1, port2);
1237 priv->v2p.port1 = port1;
1238 priv->v2p.port2 = port2;
1239 } else {
1240 mlx4_err(dev, "Failed to change port mape: %d\n", err);
1241 }
1242 }
1243
1244 mutex_unlock(&priv->bond_mutex);
1245 return err;
1246}
1247EXPORT_SYMBOL_GPL(mlx4_port_map_set);
1248
1155static int mlx4_load_fw(struct mlx4_dev *dev) 1249static int mlx4_load_fw(struct mlx4_dev *dev)
1156{ 1250{
1157 struct mlx4_priv *priv = mlx4_priv(dev); 1251 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1477,7 +1571,8 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
1477 struct mlx4_priv *priv = mlx4_priv(dev); 1571 struct mlx4_priv *priv = mlx4_priv(dev);
1478 1572
1479 mutex_lock(&priv->cmd.slave_cmd_mutex); 1573 mutex_lock(&priv->cmd.slave_cmd_mutex);
1480 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 1574 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1575 MLX4_COMM_TIME))
1481 mlx4_warn(dev, "Failed to close slave function\n"); 1576 mlx4_warn(dev, "Failed to close slave function\n");
1482 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1577 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1483} 1578}
@@ -1492,9 +1587,9 @@ static int map_bf_area(struct mlx4_dev *dev)
1492 if (!dev->caps.bf_reg_size) 1587 if (!dev->caps.bf_reg_size)
1493 return -ENXIO; 1588 return -ENXIO;
1494 1589
1495 bf_start = pci_resource_start(dev->pdev, 2) + 1590 bf_start = pci_resource_start(dev->persist->pdev, 2) +
1496 (dev->caps.num_uars << PAGE_SHIFT); 1591 (dev->caps.num_uars << PAGE_SHIFT);
1497 bf_len = pci_resource_len(dev->pdev, 2) - 1592 bf_len = pci_resource_len(dev->persist->pdev, 2) -
1498 (dev->caps.num_uars << PAGE_SHIFT); 1593 (dev->caps.num_uars << PAGE_SHIFT);
1499 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1594 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1500 if (!priv->bf_mapping) 1595 if (!priv->bf_mapping)
@@ -1536,7 +1631,8 @@ static int map_internal_clock(struct mlx4_dev *dev)
1536 struct mlx4_priv *priv = mlx4_priv(dev); 1631 struct mlx4_priv *priv = mlx4_priv(dev);
1537 1632
1538 priv->clock_mapping = 1633 priv->clock_mapping =
1539 ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) + 1634 ioremap(pci_resource_start(dev->persist->pdev,
1635 priv->fw.clock_bar) +
1540 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1636 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1541 1637
1542 if (!priv->clock_mapping) 1638 if (!priv->clock_mapping)
@@ -1573,6 +1669,50 @@ static void mlx4_close_fw(struct mlx4_dev *dev)
1573 } 1669 }
1574} 1670}
1575 1671
1672static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1673{
1674#define COMM_CHAN_OFFLINE_OFFSET 0x09
1675
1676 u32 comm_flags;
1677 u32 offline_bit;
1678 unsigned long end;
1679 struct mlx4_priv *priv = mlx4_priv(dev);
1680
1681 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
1682 while (time_before(jiffies, end)) {
1683 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
1684 MLX4_COMM_CHAN_FLAGS));
1685 offline_bit = (comm_flags &
1686 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
1687 if (!offline_bit)
1688 return 0;
1689 /* There are cases as part of AER/Reset flow that PF needs
1690 * around 100 msec to load. We therefore sleep for 100 msec
1691 * to allow other tasks to make use of that CPU during this
1692 * time interval.
1693 */
1694 msleep(100);
1695 }
1696 mlx4_err(dev, "Communication channel is offline.\n");
1697 return -EIO;
1698}
1699
1700static void mlx4_reset_vf_support(struct mlx4_dev *dev)
1701{
1702#define COMM_CHAN_RST_OFFSET 0x1e
1703
1704 struct mlx4_priv *priv = mlx4_priv(dev);
1705 u32 comm_rst;
1706 u32 comm_caps;
1707
1708 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
1709 MLX4_COMM_CHAN_CAPS));
1710 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
1711
1712 if (comm_rst)
1713 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
1714}
1715
1576static int mlx4_init_slave(struct mlx4_dev *dev) 1716static int mlx4_init_slave(struct mlx4_dev *dev)
1577{ 1717{
1578 struct mlx4_priv *priv = mlx4_priv(dev); 1718 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1588,9 +1728,15 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1588 1728
1589 mutex_lock(&priv->cmd.slave_cmd_mutex); 1729 mutex_lock(&priv->cmd.slave_cmd_mutex);
1590 priv->cmd.max_cmds = 1; 1730 priv->cmd.max_cmds = 1;
1731 if (mlx4_comm_check_offline(dev)) {
1732 mlx4_err(dev, "PF is not responsive, skipping initialization\n");
1733 goto err_offline;
1734 }
1735
1736 mlx4_reset_vf_support(dev);
1591 mlx4_warn(dev, "Sending reset\n"); 1737 mlx4_warn(dev, "Sending reset\n");
1592 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1738 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1593 MLX4_COMM_TIME); 1739 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
1594 /* if we are in the middle of flr the slave will try 1740 /* if we are in the middle of flr the slave will try
1595 * NUM_OF_RESET_RETRIES times before leaving.*/ 1741 * NUM_OF_RESET_RETRIES times before leaving.*/
1596 if (ret_from_reset) { 1742 if (ret_from_reset) {
@@ -1615,22 +1761,24 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1615 1761
1616 mlx4_warn(dev, "Sending vhcr0\n"); 1762 mlx4_warn(dev, "Sending vhcr0\n");
1617 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1763 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
1618 MLX4_COMM_TIME)) 1764 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1619 goto err; 1765 goto err;
1620 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1766 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
1621 MLX4_COMM_TIME)) 1767 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1622 goto err; 1768 goto err;
1623 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1769 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
1624 MLX4_COMM_TIME)) 1770 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1625 goto err; 1771 goto err;
1626 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) 1772 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
1773 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1627 goto err; 1774 goto err;
1628 1775
1629 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1776 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1630 return 0; 1777 return 0;
1631 1778
1632err: 1779err:
1633 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); 1780 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
1781err_offline:
1634 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1782 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1635 return -EIO; 1783 return -EIO;
1636} 1784}
@@ -1705,7 +1853,8 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1705 if (mlx4_log_num_mgm_entry_size <= 0 && 1853 if (mlx4_log_num_mgm_entry_size <= 0 &&
1706 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1854 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1707 (!mlx4_is_mfunc(dev) || 1855 (!mlx4_is_mfunc(dev) ||
1708 (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) && 1856 (dev_cap->fs_max_num_qp_per_entry >=
1857 (dev->persist->num_vfs + 1))) &&
1709 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 1858 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1710 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 1859 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1711 dev->oper_log_mgm_entry_size = 1860 dev->oper_log_mgm_entry_size =
@@ -2287,7 +2436,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2287 for (i = 0; i < nreq; ++i) 2436 for (i = 0; i < nreq; ++i)
2288 entries[i].entry = i; 2437 entries[i].entry = i;
2289 2438
2290 nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq); 2439 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2440 nreq);
2291 2441
2292 if (nreq < 0) { 2442 if (nreq < 0) {
2293 kfree(entries); 2443 kfree(entries);
@@ -2315,7 +2465,7 @@ no_msi:
2315 dev->caps.comp_pool = 0; 2465 dev->caps.comp_pool = 0;
2316 2466
2317 for (i = 0; i < 2; ++i) 2467 for (i = 0; i < 2; ++i)
2318 priv->eq_table.eq[i].irq = dev->pdev->irq; 2468 priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
2319} 2469}
2320 2470
2321static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2471static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2343,7 +2493,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2343 info->port_attr.show = show_port_type; 2493 info->port_attr.show = show_port_type;
2344 sysfs_attr_init(&info->port_attr.attr); 2494 sysfs_attr_init(&info->port_attr.attr);
2345 2495
2346 err = device_create_file(&dev->pdev->dev, &info->port_attr); 2496 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
2347 if (err) { 2497 if (err) {
2348 mlx4_err(dev, "Failed to create file for port %d\n", port); 2498 mlx4_err(dev, "Failed to create file for port %d\n", port);
2349 info->port = -1; 2499 info->port = -1;
@@ -2360,10 +2510,12 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2360 info->port_mtu_attr.show = show_port_ib_mtu; 2510 info->port_mtu_attr.show = show_port_ib_mtu;
2361 sysfs_attr_init(&info->port_mtu_attr.attr); 2511 sysfs_attr_init(&info->port_mtu_attr.attr);
2362 2512
2363 err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); 2513 err = device_create_file(&dev->persist->pdev->dev,
2514 &info->port_mtu_attr);
2364 if (err) { 2515 if (err) {
2365 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2516 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
2366 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 2517 device_remove_file(&info->dev->persist->pdev->dev,
2518 &info->port_attr);
2367 info->port = -1; 2519 info->port = -1;
2368 } 2520 }
2369 2521
@@ -2375,8 +2527,9 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
2375 if (info->port < 0) 2527 if (info->port < 0)
2376 return; 2528 return;
2377 2529
2378 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 2530 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
2379 device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); 2531 device_remove_file(&info->dev->persist->pdev->dev,
2532 &info->port_mtu_attr);
2380} 2533}
2381 2534
2382static int mlx4_init_steering(struct mlx4_dev *dev) 2535static int mlx4_init_steering(struct mlx4_dev *dev)
@@ -2443,10 +2596,11 @@ static int mlx4_get_ownership(struct mlx4_dev *dev)
2443 void __iomem *owner; 2596 void __iomem *owner;
2444 u32 ret; 2597 u32 ret;
2445 2598
2446 if (pci_channel_offline(dev->pdev)) 2599 if (pci_channel_offline(dev->persist->pdev))
2447 return -EIO; 2600 return -EIO;
2448 2601
2449 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 2602 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
2603 MLX4_OWNER_BASE,
2450 MLX4_OWNER_SIZE); 2604 MLX4_OWNER_SIZE);
2451 if (!owner) { 2605 if (!owner) {
2452 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2606 mlx4_err(dev, "Failed to obtain ownership bit\n");
@@ -2462,10 +2616,11 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
2462{ 2616{
2463 void __iomem *owner; 2617 void __iomem *owner;
2464 2618
2465 if (pci_channel_offline(dev->pdev)) 2619 if (pci_channel_offline(dev->persist->pdev))
2466 return; 2620 return;
2467 2621
2468 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 2622 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
2623 MLX4_OWNER_BASE,
2469 MLX4_OWNER_SIZE); 2624 MLX4_OWNER_SIZE);
2470 if (!owner) { 2625 if (!owner) {
2471 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2626 mlx4_err(dev, "Failed to obtain ownership bit\n");
@@ -2480,11 +2635,19 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
2480 !!((flags) & MLX4_FLAG_MASTER)) 2635 !!((flags) & MLX4_FLAG_MASTER))
2481 2636
2482static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 2637static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
2483 u8 total_vfs, int existing_vfs) 2638 u8 total_vfs, int existing_vfs, int reset_flow)
2484{ 2639{
2485 u64 dev_flags = dev->flags; 2640 u64 dev_flags = dev->flags;
2486 int err = 0; 2641 int err = 0;
2487 2642
2643 if (reset_flow) {
2644 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
2645 GFP_KERNEL);
2646 if (!dev->dev_vfs)
2647 goto free_mem;
2648 return dev_flags;
2649 }
2650
2488 atomic_inc(&pf_loading); 2651 atomic_inc(&pf_loading);
2489 if (dev->flags & MLX4_FLAG_SRIOV) { 2652 if (dev->flags & MLX4_FLAG_SRIOV) {
2490 if (existing_vfs != total_vfs) { 2653 if (existing_vfs != total_vfs) {
@@ -2513,13 +2676,14 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
2513 dev_flags |= MLX4_FLAG_SRIOV | 2676 dev_flags |= MLX4_FLAG_SRIOV |
2514 MLX4_FLAG_MASTER; 2677 MLX4_FLAG_MASTER;
2515 dev_flags &= ~MLX4_FLAG_SLAVE; 2678 dev_flags &= ~MLX4_FLAG_SLAVE;
2516 dev->num_vfs = total_vfs; 2679 dev->persist->num_vfs = total_vfs;
2517 } 2680 }
2518 return dev_flags; 2681 return dev_flags;
2519 2682
2520disable_sriov: 2683disable_sriov:
2521 atomic_dec(&pf_loading); 2684 atomic_dec(&pf_loading);
2522 dev->num_vfs = 0; 2685free_mem:
2686 dev->persist->num_vfs = 0;
2523 kfree(dev->dev_vfs); 2687 kfree(dev->dev_vfs);
2524 return dev_flags & ~MLX4_FLAG_MASTER; 2688 return dev_flags & ~MLX4_FLAG_MASTER;
2525} 2689}
@@ -2543,7 +2707,8 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
2543} 2707}
2544 2708
2545static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 2709static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2546 int total_vfs, int *nvfs, struct mlx4_priv *priv) 2710 int total_vfs, int *nvfs, struct mlx4_priv *priv,
2711 int reset_flow)
2547{ 2712{
2548 struct mlx4_dev *dev; 2713 struct mlx4_dev *dev;
2549 unsigned sum = 0; 2714 unsigned sum = 0;
@@ -2559,6 +2724,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2559 spin_lock_init(&priv->ctx_lock); 2724 spin_lock_init(&priv->ctx_lock);
2560 2725
2561 mutex_init(&priv->port_mutex); 2726 mutex_init(&priv->port_mutex);
2727 mutex_init(&priv->bond_mutex);
2562 2728
2563 INIT_LIST_HEAD(&priv->pgdir_list); 2729 INIT_LIST_HEAD(&priv->pgdir_list);
2564 mutex_init(&priv->pgdir_mutex); 2730 mutex_init(&priv->pgdir_mutex);
@@ -2606,10 +2772,15 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2606 existing_vfs = pci_num_vf(pdev); 2772 existing_vfs = pci_num_vf(pdev);
2607 if (existing_vfs) 2773 if (existing_vfs)
2608 dev->flags |= MLX4_FLAG_SRIOV; 2774 dev->flags |= MLX4_FLAG_SRIOV;
2609 dev->num_vfs = total_vfs; 2775 dev->persist->num_vfs = total_vfs;
2610 } 2776 }
2611 } 2777 }
2612 2778
2779 /* on load remove any previous indication of internal error,
2780 * device is up.
2781 */
2782 dev->persist->state = MLX4_DEVICE_STATE_UP;
2783
2613slave_start: 2784slave_start:
2614 err = mlx4_cmd_init(dev); 2785 err = mlx4_cmd_init(dev);
2615 if (err) { 2786 if (err) {
@@ -2660,8 +2831,10 @@ slave_start:
2660 goto err_fw; 2831 goto err_fw;
2661 2832
2662 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2833 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
2663 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 2834 u64 dev_flags = mlx4_enable_sriov(dev, pdev,
2664 existing_vfs); 2835 total_vfs,
2836 existing_vfs,
2837 reset_flow);
2665 2838
2666 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2839 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2667 dev->flags = dev_flags; 2840 dev->flags = dev_flags;
@@ -2703,7 +2876,7 @@ slave_start:
2703 if (dev->flags & MLX4_FLAG_SRIOV) { 2876 if (dev->flags & MLX4_FLAG_SRIOV) {
2704 if (!existing_vfs) 2877 if (!existing_vfs)
2705 pci_disable_sriov(pdev); 2878 pci_disable_sriov(pdev);
2706 if (mlx4_is_master(dev)) 2879 if (mlx4_is_master(dev) && !reset_flow)
2707 atomic_dec(&pf_loading); 2880 atomic_dec(&pf_loading);
2708 dev->flags &= ~MLX4_FLAG_SRIOV; 2881 dev->flags &= ~MLX4_FLAG_SRIOV;
2709 } 2882 }
@@ -2717,7 +2890,8 @@ slave_start:
2717 } 2890 }
2718 2891
2719 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2892 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
2720 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs); 2893 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
2894 existing_vfs, reset_flow);
2721 2895
2722 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 2896 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
2723 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 2897 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
@@ -2770,12 +2944,14 @@ slave_start:
2770 dev->caps.num_ports); 2944 dev->caps.num_ports);
2771 goto err_close; 2945 goto err_close;
2772 } 2946 }
2773 memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs)); 2947 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
2774 2948
2775 for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) { 2949 for (i = 0;
2950 i < sizeof(dev->persist->nvfs)/
2951 sizeof(dev->persist->nvfs[0]); i++) {
2776 unsigned j; 2952 unsigned j;
2777 2953
2778 for (j = 0; j < dev->nvfs[i]; ++sum, ++j) { 2954 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
2779 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 2955 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
2780 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 2956 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
2781 dev->caps.num_ports; 2957 dev->caps.num_ports;
@@ -2827,6 +3003,17 @@ slave_start:
2827 goto err_steer; 3003 goto err_steer;
2828 3004
2829 mlx4_init_quotas(dev); 3005 mlx4_init_quotas(dev);
3006 /* When PF resources are ready arm its comm channel to enable
3007 * getting commands
3008 */
3009 if (mlx4_is_master(dev)) {
3010 err = mlx4_ARM_COMM_CHANNEL(dev);
3011 if (err) {
3012 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3013 err);
3014 goto err_steer;
3015 }
3016 }
2830 3017
2831 for (port = 1; port <= dev->caps.num_ports; port++) { 3018 for (port = 1; port <= dev->caps.num_ports; port++) {
2832 err = mlx4_init_port_info(dev, port); 3019 err = mlx4_init_port_info(dev, port);
@@ -2834,6 +3021,9 @@ slave_start:
2834 goto err_port; 3021 goto err_port;
2835 } 3022 }
2836 3023
3024 priv->v2p.port1 = 1;
3025 priv->v2p.port2 = 2;
3026
2837 err = mlx4_register_device(dev); 3027 err = mlx4_register_device(dev);
2838 if (err) 3028 if (err)
2839 goto err_port; 3029 goto err_port;
@@ -2845,7 +3035,7 @@ slave_start:
2845 3035
2846 priv->removed = 0; 3036 priv->removed = 0;
2847 3037
2848 if (mlx4_is_master(dev) && dev->num_vfs) 3038 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
2849 atomic_dec(&pf_loading); 3039 atomic_dec(&pf_loading);
2850 3040
2851 kfree(dev_cap); 3041 kfree(dev_cap);
@@ -2879,8 +3069,10 @@ err_free_eq:
2879 mlx4_free_eq_table(dev); 3069 mlx4_free_eq_table(dev);
2880 3070
2881err_master_mfunc: 3071err_master_mfunc:
2882 if (mlx4_is_master(dev)) 3072 if (mlx4_is_master(dev)) {
3073 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
2883 mlx4_multi_func_cleanup(dev); 3074 mlx4_multi_func_cleanup(dev);
3075 }
2884 3076
2885 if (mlx4_is_slave(dev)) { 3077 if (mlx4_is_slave(dev)) {
2886 kfree(dev->caps.qp0_qkey); 3078 kfree(dev->caps.qp0_qkey);
@@ -2904,10 +3096,12 @@ err_cmd:
2904 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3096 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2905 3097
2906err_sriov: 3098err_sriov:
2907 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) 3099 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
2908 pci_disable_sriov(pdev); 3100 pci_disable_sriov(pdev);
3101 dev->flags &= ~MLX4_FLAG_SRIOV;
3102 }
2909 3103
2910 if (mlx4_is_master(dev) && dev->num_vfs) 3104 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
2911 atomic_dec(&pf_loading); 3105 atomic_dec(&pf_loading);
2912 3106
2913 kfree(priv->dev.dev_vfs); 3107 kfree(priv->dev.dev_vfs);
@@ -3048,11 +3242,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3048 } 3242 }
3049 } 3243 }
3050 3244
3051 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); 3245 err = mlx4_catas_init(&priv->dev);
3052 if (err) 3246 if (err)
3053 goto err_release_regions; 3247 goto err_release_regions;
3248
3249 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
3250 if (err)
3251 goto err_catas;
3252
3054 return 0; 3253 return 0;
3055 3254
3255err_catas:
3256 mlx4_catas_end(&priv->dev);
3257
3056err_release_regions: 3258err_release_regions:
3057 pci_release_regions(pdev); 3259 pci_release_regions(pdev);
3058 3260
@@ -3075,38 +3277,60 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3075 return -ENOMEM; 3277 return -ENOMEM;
3076 3278
3077 dev = &priv->dev; 3279 dev = &priv->dev;
3078 dev->pdev = pdev; 3280 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
3079 pci_set_drvdata(pdev, dev); 3281 if (!dev->persist) {
3282 kfree(priv);
3283 return -ENOMEM;
3284 }
3285 dev->persist->pdev = pdev;
3286 dev->persist->dev = dev;
3287 pci_set_drvdata(pdev, dev->persist);
3080 priv->pci_dev_data = id->driver_data; 3288 priv->pci_dev_data = id->driver_data;
3289 mutex_init(&dev->persist->device_state_mutex);
3290 mutex_init(&dev->persist->interface_state_mutex);
3081 3291
3082 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3292 ret = __mlx4_init_one(pdev, id->driver_data, priv);
3083 if (ret) 3293 if (ret) {
3294 kfree(dev->persist);
3084 kfree(priv); 3295 kfree(priv);
3296 } else {
3297 pci_save_state(pdev);
3298 }
3085 3299
3086 return ret; 3300 return ret;
3087} 3301}
3088 3302
3303static void mlx4_clean_dev(struct mlx4_dev *dev)
3304{
3305 struct mlx4_dev_persistent *persist = dev->persist;
3306 struct mlx4_priv *priv = mlx4_priv(dev);
3307 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
3308
3309 memset(priv, 0, sizeof(*priv));
3310 priv->dev.persist = persist;
3311 priv->dev.flags = flags;
3312}
3313
3089static void mlx4_unload_one(struct pci_dev *pdev) 3314static void mlx4_unload_one(struct pci_dev *pdev)
3090{ 3315{
3091 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3316 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3317 struct mlx4_dev *dev = persist->dev;
3092 struct mlx4_priv *priv = mlx4_priv(dev); 3318 struct mlx4_priv *priv = mlx4_priv(dev);
3093 int pci_dev_data; 3319 int pci_dev_data;
3094 int p; 3320 int p, i;
3095 int active_vfs = 0;
3096 3321
3097 if (priv->removed) 3322 if (priv->removed)
3098 return; 3323 return;
3099 3324
3325 /* saving current ports type for further use */
3326 for (i = 0; i < dev->caps.num_ports; i++) {
3327 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
3328 dev->persist->curr_port_poss_type[i] = dev->caps.
3329 possible_type[i + 1];
3330 }
3331
3100 pci_dev_data = priv->pci_dev_data; 3332 pci_dev_data = priv->pci_dev_data;
3101 3333
3102 /* Disabling SR-IOV is not allowed while there are active vf's */
3103 if (mlx4_is_master(dev)) {
3104 active_vfs = mlx4_how_many_lives_vf(dev);
3105 if (active_vfs) {
3106 pr_warn("Removing PF when there are active VF's !!\n");
3107 pr_warn("Will not disable SR-IOV.\n");
3108 }
3109 }
3110 mlx4_stop_sense(dev); 3334 mlx4_stop_sense(dev);
3111 mlx4_unregister_device(dev); 3335 mlx4_unregister_device(dev);
3112 3336
@@ -3150,12 +3374,6 @@ static void mlx4_unload_one(struct pci_dev *pdev)
3150 3374
3151 if (dev->flags & MLX4_FLAG_MSI_X) 3375 if (dev->flags & MLX4_FLAG_MSI_X)
3152 pci_disable_msix(pdev); 3376 pci_disable_msix(pdev);
3153 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3154 mlx4_warn(dev, "Disabling SR-IOV\n");
3155 pci_disable_sriov(pdev);
3156 dev->flags &= ~MLX4_FLAG_SRIOV;
3157 dev->num_vfs = 0;
3158 }
3159 3377
3160 if (!mlx4_is_slave(dev)) 3378 if (!mlx4_is_slave(dev))
3161 mlx4_free_ownership(dev); 3379 mlx4_free_ownership(dev);
@@ -3167,42 +3385,96 @@ static void mlx4_unload_one(struct pci_dev *pdev)
3167 kfree(dev->caps.qp1_proxy); 3385 kfree(dev->caps.qp1_proxy);
3168 kfree(dev->dev_vfs); 3386 kfree(dev->dev_vfs);
3169 3387
3170 memset(priv, 0, sizeof(*priv)); 3388 mlx4_clean_dev(dev);
3171 priv->pci_dev_data = pci_dev_data; 3389 priv->pci_dev_data = pci_dev_data;
3172 priv->removed = 1; 3390 priv->removed = 1;
3173} 3391}
3174 3392
3175static void mlx4_remove_one(struct pci_dev *pdev) 3393static void mlx4_remove_one(struct pci_dev *pdev)
3176{ 3394{
3177 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3395 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3396 struct mlx4_dev *dev = persist->dev;
3178 struct mlx4_priv *priv = mlx4_priv(dev); 3397 struct mlx4_priv *priv = mlx4_priv(dev);
3398 int active_vfs = 0;
3399
3400 mutex_lock(&persist->interface_state_mutex);
3401 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
3402 mutex_unlock(&persist->interface_state_mutex);
3403
3404 /* Disabling SR-IOV is not allowed while there are active vf's */
3405 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
3406 active_vfs = mlx4_how_many_lives_vf(dev);
3407 if (active_vfs) {
3408 pr_warn("Removing PF when there are active VF's !!\n");
3409 pr_warn("Will not disable SR-IOV.\n");
3410 }
3411 }
3412
3413 /* device marked to be under deletion running now without the lock
3414 * letting other tasks to be terminated
3415 */
3416 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3417 mlx4_unload_one(pdev);
3418 else
3419 mlx4_info(dev, "%s: interface is down\n", __func__);
3420 mlx4_catas_end(dev);
3421 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3422 mlx4_warn(dev, "Disabling SR-IOV\n");
3423 pci_disable_sriov(pdev);
3424 }
3179 3425
3180 mlx4_unload_one(pdev);
3181 pci_release_regions(pdev); 3426 pci_release_regions(pdev);
3182 pci_disable_device(pdev); 3427 pci_disable_device(pdev);
3428 kfree(dev->persist);
3183 kfree(priv); 3429 kfree(priv);
3184 pci_set_drvdata(pdev, NULL); 3430 pci_set_drvdata(pdev, NULL);
3185} 3431}
3186 3432
3433static int restore_current_port_types(struct mlx4_dev *dev,
3434 enum mlx4_port_type *types,
3435 enum mlx4_port_type *poss_types)
3436{
3437 struct mlx4_priv *priv = mlx4_priv(dev);
3438 int err, i;
3439
3440 mlx4_stop_sense(dev);
3441
3442 mutex_lock(&priv->port_mutex);
3443 for (i = 0; i < dev->caps.num_ports; i++)
3444 dev->caps.possible_type[i + 1] = poss_types[i];
3445 err = mlx4_change_port_types(dev, types);
3446 mlx4_start_sense(dev);
3447 mutex_unlock(&priv->port_mutex);
3448
3449 return err;
3450}
3451
3187int mlx4_restart_one(struct pci_dev *pdev) 3452int mlx4_restart_one(struct pci_dev *pdev)
3188{ 3453{
3189 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3454 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3455 struct mlx4_dev *dev = persist->dev;
3190 struct mlx4_priv *priv = mlx4_priv(dev); 3456 struct mlx4_priv *priv = mlx4_priv(dev);
3191 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3457 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3192 int pci_dev_data, err, total_vfs; 3458 int pci_dev_data, err, total_vfs;
3193 3459
3194 pci_dev_data = priv->pci_dev_data; 3460 pci_dev_data = priv->pci_dev_data;
3195 total_vfs = dev->num_vfs; 3461 total_vfs = dev->persist->num_vfs;
3196 memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs)); 3462 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
3197 3463
3198 mlx4_unload_one(pdev); 3464 mlx4_unload_one(pdev);
3199 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); 3465 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
3200 if (err) { 3466 if (err) {
3201 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3467 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
3202 __func__, pci_name(pdev), err); 3468 __func__, pci_name(pdev), err);
3203 return err; 3469 return err;
3204 } 3470 }
3205 3471
3472 err = restore_current_port_types(dev, dev->persist->curr_port_type,
3473 dev->persist->curr_port_poss_type);
3474 if (err)
3475 mlx4_err(dev, "could not restore original port types (%d)\n",
3476 err);
3477
3206 return err; 3478 return err;
3207} 3479}
3208 3480
@@ -3257,23 +3529,79 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
3257static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 3529static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
3258 pci_channel_state_t state) 3530 pci_channel_state_t state)
3259{ 3531{
3260 mlx4_unload_one(pdev); 3532 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3533
3534 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
3535 mlx4_enter_error_state(persist);
3261 3536
3262 return state == pci_channel_io_perm_failure ? 3537 mutex_lock(&persist->interface_state_mutex);
3263 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 3538 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3539 mlx4_unload_one(pdev);
3540
3541 mutex_unlock(&persist->interface_state_mutex);
3542 if (state == pci_channel_io_perm_failure)
3543 return PCI_ERS_RESULT_DISCONNECT;
3544
3545 pci_disable_device(pdev);
3546 return PCI_ERS_RESULT_NEED_RESET;
3264} 3547}
3265 3548
3266static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 3549static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
3267{ 3550{
3268 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3551 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3552 struct mlx4_dev *dev = persist->dev;
3269 struct mlx4_priv *priv = mlx4_priv(dev); 3553 struct mlx4_priv *priv = mlx4_priv(dev);
3270 int ret; 3554 int ret;
3555 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3556 int total_vfs;
3271 3557
3272 ret = __mlx4_init_one(pdev, priv->pci_dev_data, priv); 3558 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
3559 ret = pci_enable_device(pdev);
3560 if (ret) {
3561 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
3562 return PCI_ERS_RESULT_DISCONNECT;
3563 }
3564
3565 pci_set_master(pdev);
3566 pci_restore_state(pdev);
3567 pci_save_state(pdev);
3568
3569 total_vfs = dev->persist->num_vfs;
3570 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
3571
3572 mutex_lock(&persist->interface_state_mutex);
3573 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
3574 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
3575 priv, 1);
3576 if (ret) {
3577 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
3578 __func__, ret);
3579 goto end;
3580 }
3581
3582 ret = restore_current_port_types(dev, dev->persist->
3583 curr_port_type, dev->persist->
3584 curr_port_poss_type);
3585 if (ret)
3586 mlx4_err(dev, "could not restore original port types (%d)\n", ret);
3587 }
3588end:
3589 mutex_unlock(&persist->interface_state_mutex);
3273 3590
3274 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 3591 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
3275} 3592}
3276 3593
3594static void mlx4_shutdown(struct pci_dev *pdev)
3595{
3596 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3597
3598 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
3599 mutex_lock(&persist->interface_state_mutex);
3600 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3601 mlx4_unload_one(pdev);
3602 mutex_unlock(&persist->interface_state_mutex);
3603}
3604
3277static const struct pci_error_handlers mlx4_err_handler = { 3605static const struct pci_error_handlers mlx4_err_handler = {
3278 .error_detected = mlx4_pci_err_detected, 3606 .error_detected = mlx4_pci_err_detected,
3279 .slot_reset = mlx4_pci_slot_reset, 3607 .slot_reset = mlx4_pci_slot_reset,
@@ -3283,7 +3611,7 @@ static struct pci_driver mlx4_driver = {
3283 .name = DRV_NAME, 3611 .name = DRV_NAME,
3284 .id_table = mlx4_pci_table, 3612 .id_table = mlx4_pci_table,
3285 .probe = mlx4_init_one, 3613 .probe = mlx4_init_one,
3286 .shutdown = mlx4_unload_one, 3614 .shutdown = mlx4_shutdown,
3287 .remove = mlx4_remove_one, 3615 .remove = mlx4_remove_one,
3288 .err_handler = &mlx4_err_handler, 3616 .err_handler = &mlx4_err_handler,
3289}; 3617};
@@ -3335,7 +3663,6 @@ static int __init mlx4_init(void)
3335 if (mlx4_verify_params()) 3663 if (mlx4_verify_params())
3336 return -EINVAL; 3664 return -EINVAL;
3337 3665
3338 mlx4_catas_init();
3339 3666
3340 mlx4_wq = create_singlethread_workqueue("mlx4"); 3667 mlx4_wq = create_singlethread_workqueue("mlx4");
3341 if (!mlx4_wq) 3668 if (!mlx4_wq)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index a3867e7ef885..bd9ea0d01aae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1318,6 +1318,9 @@ out:
1318 mutex_unlock(&priv->mcg_table.mutex); 1318 mutex_unlock(&priv->mcg_table.mutex);
1319 1319
1320 mlx4_free_cmd_mailbox(dev, mailbox); 1320 mlx4_free_cmd_mailbox(dev, mailbox);
1321 if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
1322 /* In case device is under an error, return success as a closing command */
1323 err = 0;
1321 return err; 1324 return err;
1322} 1325}
1323 1326
@@ -1347,6 +1350,9 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
1347 MLX4_CMD_WRAPPED); 1350 MLX4_CMD_WRAPPED);
1348 1351
1349 mlx4_free_cmd_mailbox(dev, mailbox); 1352 mlx4_free_cmd_mailbox(dev, mailbox);
1353 if (err && !attach &&
1354 dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
1355 err = 0;
1350 return err; 1356 return err;
1351} 1357}
1352 1358
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 210691c89b6c..1409d0cd6143 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -85,7 +85,9 @@ enum {
85 MLX4_CLR_INT_SIZE = 0x00008, 85 MLX4_CLR_INT_SIZE = 0x00008,
86 MLX4_SLAVE_COMM_BASE = 0x0, 86 MLX4_SLAVE_COMM_BASE = 0x0,
87 MLX4_COMM_PAGESIZE = 0x1000, 87 MLX4_COMM_PAGESIZE = 0x1000,
88 MLX4_CLOCK_SIZE = 0x00008 88 MLX4_CLOCK_SIZE = 0x00008,
89 MLX4_COMM_CHAN_CAPS = 0x8,
90 MLX4_COMM_CHAN_FLAGS = 0xc
89}; 91};
90 92
91enum { 93enum {
@@ -120,6 +122,10 @@ enum mlx4_mpt_state {
120}; 122};
121 123
122#define MLX4_COMM_TIME 10000 124#define MLX4_COMM_TIME 10000
125#define MLX4_COMM_OFFLINE_TIME_OUT 30000
126#define MLX4_COMM_CMD_NA_OP 0x0
127
128
123enum { 129enum {
124 MLX4_COMM_CMD_RESET, 130 MLX4_COMM_CMD_RESET,
125 MLX4_COMM_CMD_VHCR0, 131 MLX4_COMM_CMD_VHCR0,
@@ -190,6 +196,7 @@ struct mlx4_vhcr {
190struct mlx4_vhcr_cmd { 196struct mlx4_vhcr_cmd {
191 __be64 in_param; 197 __be64 in_param;
192 __be32 in_modifier; 198 __be32 in_modifier;
199 u32 reserved1;
193 __be64 out_param; 200 __be64 out_param;
194 __be16 token; 201 __be16 token;
195 u16 reserved; 202 u16 reserved;
@@ -221,19 +228,21 @@ extern int mlx4_debug_level;
221#define mlx4_dbg(mdev, format, ...) \ 228#define mlx4_dbg(mdev, format, ...) \
222do { \ 229do { \
223 if (mlx4_debug_level) \ 230 if (mlx4_debug_level) \
224 dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \ 231 dev_printk(KERN_DEBUG, \
232 &(mdev)->persist->pdev->dev, format, \
225 ##__VA_ARGS__); \ 233 ##__VA_ARGS__); \
226} while (0) 234} while (0)
227 235
228#define mlx4_err(mdev, format, ...) \ 236#define mlx4_err(mdev, format, ...) \
229 dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__) 237 dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
230#define mlx4_info(mdev, format, ...) \ 238#define mlx4_info(mdev, format, ...) \
231 dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__) 239 dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
232#define mlx4_warn(mdev, format, ...) \ 240#define mlx4_warn(mdev, format, ...) \
233 dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__) 241 dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
234 242
235extern int mlx4_log_num_mgm_entry_size; 243extern int mlx4_log_num_mgm_entry_size;
236extern int log_mtts_per_seg; 244extern int log_mtts_per_seg;
245extern int mlx4_internal_err_reset;
237 246
238#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \ 247#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \
239 MLX4_MFUNC_MAX)) 248 MLX4_MFUNC_MAX))
@@ -607,7 +616,6 @@ struct mlx4_mgm {
607struct mlx4_cmd { 616struct mlx4_cmd {
608 struct pci_pool *pool; 617 struct pci_pool *pool;
609 void __iomem *hcr; 618 void __iomem *hcr;
610 struct mutex hcr_mutex;
611 struct mutex slave_cmd_mutex; 619 struct mutex slave_cmd_mutex;
612 struct semaphore poll_sem; 620 struct semaphore poll_sem;
613 struct semaphore event_sem; 621 struct semaphore event_sem;
@@ -878,6 +886,8 @@ struct mlx4_priv {
878 int reserved_mtts; 886 int reserved_mtts;
879 int fs_hash_mode; 887 int fs_hash_mode;
880 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 888 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
889 struct mlx4_port_map v2p; /* cached port mapping configuration */
890 struct mutex bond_mutex; /* for bond mode */
881 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 891 __be64 slave_node_guids[MLX4_MFUNC_MAX];
882 892
883 atomic_t opreq_count; 893 atomic_t opreq_count;
@@ -995,7 +1005,8 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
995 1005
996void mlx4_start_catas_poll(struct mlx4_dev *dev); 1006void mlx4_start_catas_poll(struct mlx4_dev *dev);
997void mlx4_stop_catas_poll(struct mlx4_dev *dev); 1007void mlx4_stop_catas_poll(struct mlx4_dev *dev);
998void mlx4_catas_init(void); 1008int mlx4_catas_init(struct mlx4_dev *dev);
1009void mlx4_catas_end(struct mlx4_dev *dev);
999int mlx4_restart_one(struct pci_dev *pdev); 1010int mlx4_restart_one(struct pci_dev *pdev);
1000int mlx4_register_device(struct mlx4_dev *dev); 1011int mlx4_register_device(struct mlx4_dev *dev);
1001void mlx4_unregister_device(struct mlx4_dev *dev); 1012void mlx4_unregister_device(struct mlx4_dev *dev);
@@ -1161,13 +1172,14 @@ enum {
1161int mlx4_cmd_init(struct mlx4_dev *dev); 1172int mlx4_cmd_init(struct mlx4_dev *dev);
1162void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask); 1173void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask);
1163int mlx4_multi_func_init(struct mlx4_dev *dev); 1174int mlx4_multi_func_init(struct mlx4_dev *dev);
1175int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev);
1164void mlx4_multi_func_cleanup(struct mlx4_dev *dev); 1176void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
1165void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); 1177void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
1166int mlx4_cmd_use_events(struct mlx4_dev *dev); 1178int mlx4_cmd_use_events(struct mlx4_dev *dev);
1167void mlx4_cmd_use_polling(struct mlx4_dev *dev); 1179void mlx4_cmd_use_polling(struct mlx4_dev *dev);
1168 1180
1169int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, 1181int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
1170 unsigned long timeout); 1182 u16 op, unsigned long timeout);
1171 1183
1172void mlx4_cq_tasklet_cb(unsigned long data); 1184void mlx4_cq_tasklet_cb(unsigned long data);
1173void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); 1185void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
@@ -1177,7 +1189,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
1177 1189
1178void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); 1190void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
1179 1191
1180void mlx4_handle_catas_err(struct mlx4_dev *dev); 1192void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
1181 1193
1182int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, 1194int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
1183 enum mlx4_port_type *type); 1195 enum mlx4_port_type *type);
@@ -1355,6 +1367,7 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
1355/* Returns the VF index of slave */ 1367/* Returns the VF index of slave */
1356int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); 1368int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
1357int mlx4_config_mad_demux(struct mlx4_dev *dev); 1369int mlx4_config_mad_demux(struct mlx4_dev *dev);
1370int mlx4_do_bond(struct mlx4_dev *dev, bool enable);
1358 1371
1359enum mlx4_zone_flags { 1372enum mlx4_zone_flags {
1360 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, 1373 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 944a112dff37..2a8268e6be15 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -390,6 +390,7 @@ struct mlx4_en_dev {
390 struct pci_dev *pdev; 390 struct pci_dev *pdev;
391 struct mutex state_lock; 391 struct mutex state_lock;
392 struct net_device *pndev[MLX4_MAX_PORTS + 1]; 392 struct net_device *pndev[MLX4_MAX_PORTS + 1];
393 struct net_device *upper[MLX4_MAX_PORTS + 1];
393 u32 port_cnt; 394 u32 port_cnt;
394 bool device_up; 395 bool device_up;
395 struct mlx4_en_profile profile; 396 struct mlx4_en_profile profile;
@@ -410,6 +411,7 @@ struct mlx4_en_dev {
410 unsigned long overflow_period; 411 unsigned long overflow_period;
411 struct ptp_clock *ptp_clock; 412 struct ptp_clock *ptp_clock;
412 struct ptp_clock_info ptp_clock_info; 413 struct ptp_clock_info ptp_clock_info;
414 struct notifier_block nb;
413}; 415};
414 416
415 417
@@ -845,6 +847,9 @@ int mlx4_en_reset_config(struct net_device *dev,
845 struct hwtstamp_config ts_config, 847 struct hwtstamp_config ts_config,
846 netdev_features_t new_features); 848 netdev_features_t new_features);
847 849
850int mlx4_en_netdev_event(struct notifier_block *this,
851 unsigned long event, void *ptr);
852
848/* 853/*
849 * Functions for time stamping 854 * Functions for time stamping
850 */ 855 */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 7094a9c70fd5..78f51e103880 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -598,14 +598,11 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
598 if (err) 598 if (err)
599 return err; 599 return err;
600 600
601 mpt_entry->start = cpu_to_be64(mr->iova); 601 mpt_entry->start = cpu_to_be64(iova);
602 mpt_entry->length = cpu_to_be64(mr->size); 602 mpt_entry->length = cpu_to_be64(size);
603 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 603 mpt_entry->entity_size = cpu_to_be32(page_shift);
604 604 mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE |
605 mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | 605 MLX4_MPT_FLAG_SW_OWNS));
606 MLX4_MPT_PD_FLAG_EN_INV);
607 mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
608 MLX4_MPT_FLAG_SW_OWNS);
609 if (mr->mtt.order < 0) { 606 if (mr->mtt.order < 0) {
610 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 607 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
611 mpt_entry->mtt_addr = 0; 608 mpt_entry->mtt_addr = 0;
@@ -708,13 +705,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
708 if (!mtts) 705 if (!mtts)
709 return -ENOMEM; 706 return -ENOMEM;
710 707
711 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, 708 dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle,
712 npages * sizeof (u64), DMA_TO_DEVICE); 709 npages * sizeof (u64), DMA_TO_DEVICE);
713 710
714 for (i = 0; i < npages; ++i) 711 for (i = 0; i < npages; ++i)
715 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 712 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
716 713
717 dma_sync_single_for_device(&dev->pdev->dev, dma_handle, 714 dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle,
718 npages * sizeof (u64), DMA_TO_DEVICE); 715 npages * sizeof (u64), DMA_TO_DEVICE);
719 716
720 return 0; 717 return 0;
@@ -1020,13 +1017,13 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
1020 /* Make sure MPT status is visible before writing MTT entries */ 1017 /* Make sure MPT status is visible before writing MTT entries */
1021 wmb(); 1018 wmb();
1022 1019
1023 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, 1020 dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle,
1024 npages * sizeof(u64), DMA_TO_DEVICE); 1021 npages * sizeof(u64), DMA_TO_DEVICE);
1025 1022
1026 for (i = 0; i < npages; ++i) 1023 for (i = 0; i < npages; ++i)
1027 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 1024 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
1028 1025
1029 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, 1026 dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle,
1030 npages * sizeof(u64), DMA_TO_DEVICE); 1027 npages * sizeof(u64), DMA_TO_DEVICE);
1031 1028
1032 fmr->mpt->key = cpu_to_be32(key); 1029 fmr->mpt->key = cpu_to_be32(key);
@@ -1155,7 +1152,7 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_free);
1155 1152
1156int mlx4_SYNC_TPT(struct mlx4_dev *dev) 1153int mlx4_SYNC_TPT(struct mlx4_dev *dev)
1157{ 1154{
1158 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, 1155 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,
1159 MLX4_CMD_NATIVE); 1156 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1160} 1157}
1161EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); 1158EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 74216071201f..609c59dc854e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -151,11 +151,13 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
151 return -ENOMEM; 151 return -ENOMEM;
152 152
153 if (mlx4_is_slave(dev)) 153 if (mlx4_is_slave(dev))
154 offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) / 154 offset = uar->index % ((int)pci_resource_len(dev->persist->pdev,
155 2) /
155 dev->caps.uar_page_size); 156 dev->caps.uar_page_size);
156 else 157 else
157 offset = uar->index; 158 offset = uar->index;
158 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset; 159 uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT)
160 + offset;
159 uar->map = NULL; 161 uar->map = NULL;
160 return 0; 162 return 0;
161} 163}
@@ -212,7 +214,6 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
212 list_add(&uar->bf_list, &priv->bf_list); 214 list_add(&uar->bf_list, &priv->bf_list);
213 } 215 }
214 216
215 bf->uar = uar;
216 idx = ffz(uar->free_bf_bmap); 217 idx = ffz(uar->free_bf_bmap);
217 uar->free_bf_bmap |= 1 << idx; 218 uar->free_bf_bmap |= 1 << idx;
218 bf->uar = uar; 219 bf->uar = uar;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 30eb1ead0fe6..9f268f05290a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -553,9 +553,9 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
553 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( 553 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
554 dev, &exclusive_ports); 554 dev, &exclusive_ports);
555 slave_gid -= bitmap_weight(slaves_pport_actv.slaves, 555 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
556 dev->num_vfs + 1); 556 dev->persist->num_vfs + 1);
557 } 557 }
558 vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 558 vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
559 if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs)) 559 if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
560 return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1; 560 return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
561 return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs; 561 return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
@@ -590,10 +590,10 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
590 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( 590 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
591 dev, &exclusive_ports); 591 dev, &exclusive_ports);
592 slave_gid -= bitmap_weight(slaves_pport_actv.slaves, 592 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
593 dev->num_vfs + 1); 593 dev->persist->num_vfs + 1);
594 } 594 }
595 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; 595 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
596 vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 596 vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
597 if (slave_gid <= gids % vfs) 597 if (slave_gid <= gids % vfs)
598 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1); 598 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
599 599
@@ -644,7 +644,7 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
644 int num_eth_ports, err; 644 int num_eth_ports, err;
645 int i; 645 int i;
646 646
647 if (slave < 0 || slave > dev->num_vfs) 647 if (slave < 0 || slave > dev->persist->num_vfs)
648 return; 648 return;
649 649
650 actv_ports = mlx4_get_active_ports(dev, slave); 650 actv_ports = mlx4_get_active_ports(dev, slave);
@@ -1214,7 +1214,8 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1214 return -EINVAL; 1214 return -EINVAL;
1215 1215
1216 slaves_pport = mlx4_phys_to_slaves_pport(dev, port); 1216 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1217 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 1217 num_vfs = bitmap_weight(slaves_pport.slaves,
1218 dev->persist->num_vfs + 1) - 1;
1218 1219
1219 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 1220 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1220 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, 1221 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
@@ -1258,7 +1259,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1258 dev, &exclusive_ports); 1259 dev, &exclusive_ports);
1259 num_vfs_before += bitmap_weight( 1260 num_vfs_before += bitmap_weight(
1260 slaves_pport_actv.slaves, 1261 slaves_pport_actv.slaves,
1261 dev->num_vfs + 1); 1262 dev->persist->num_vfs + 1);
1262 } 1263 }
1263 1264
1264 /* candidate_slave_gid isn't necessarily the correct slave, but 1265 /* candidate_slave_gid isn't necessarily the correct slave, but
@@ -1288,7 +1289,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1288 dev, &exclusive_ports); 1289 dev, &exclusive_ports);
1289 slave_gid += bitmap_weight( 1290 slave_gid += bitmap_weight(
1290 slaves_pport_actv.slaves, 1291 slaves_pport_actv.slaves,
1291 dev->num_vfs + 1); 1292 dev->persist->num_vfs + 1);
1292 } 1293 }
1293 } 1294 }
1294 *slave_id = slave_gid; 1295 *slave_id = slave_gid;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 1586ecce13c7..2bb8553bd905 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -882,6 +882,8 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
882 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 882 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
883 context->flags &= cpu_to_be32(~(0xf << 28)); 883 context->flags &= cpu_to_be32(~(0xf << 28));
884 context->flags |= cpu_to_be32(states[i + 1] << 28); 884 context->flags |= cpu_to_be32(states[i + 1] << 28);
885 if (states[i + 1] != MLX4_QP_STATE_RTR)
886 context->params2 &= ~MLX4_QP_BIT_FPP;
885 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], 887 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
886 context, 0, 0, qp); 888 context, 0, 0, qp);
887 if (err) { 889 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index ea1c6d092145..0076d88587ca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -76,19 +76,21 @@ int mlx4_reset(struct mlx4_dev *dev)
76 goto out; 76 goto out;
77 } 77 }
78 78
79 pcie_cap = pci_pcie_cap(dev->pdev); 79 pcie_cap = pci_pcie_cap(dev->persist->pdev);
80 80
81 for (i = 0; i < 64; ++i) { 81 for (i = 0; i < 64; ++i) {
82 if (i == 22 || i == 23) 82 if (i == 22 || i == 23)
83 continue; 83 continue;
84 if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { 84 if (pci_read_config_dword(dev->persist->pdev, i * 4,
85 hca_header + i)) {
85 err = -ENODEV; 86 err = -ENODEV;
86 mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n"); 87 mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
87 goto out; 88 goto out;
88 } 89 }
89 } 90 }
90 91
91 reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE, 92 reset = ioremap(pci_resource_start(dev->persist->pdev, 0) +
93 MLX4_RESET_BASE,
92 MLX4_RESET_SIZE); 94 MLX4_RESET_SIZE);
93 if (!reset) { 95 if (!reset) {
94 err = -ENOMEM; 96 err = -ENOMEM;
@@ -122,8 +124,8 @@ int mlx4_reset(struct mlx4_dev *dev)
122 124
123 end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; 125 end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
124 do { 126 do {
125 if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) && 127 if (!pci_read_config_word(dev->persist->pdev, PCI_VENDOR_ID,
126 vendor != 0xffff) 128 &vendor) && vendor != 0xffff)
127 break; 129 break;
128 130
129 msleep(1); 131 msleep(1);
@@ -138,14 +140,16 @@ int mlx4_reset(struct mlx4_dev *dev)
138 /* Now restore the PCI headers */ 140 /* Now restore the PCI headers */
139 if (pcie_cap) { 141 if (pcie_cap) {
140 devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; 142 devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
141 if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL, 143 if (pcie_capability_write_word(dev->persist->pdev,
144 PCI_EXP_DEVCTL,
142 devctl)) { 145 devctl)) {
143 err = -ENODEV; 146 err = -ENODEV;
144 mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n"); 147 mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
145 goto out; 148 goto out;
146 } 149 }
147 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; 150 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
148 if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL, 151 if (pcie_capability_write_word(dev->persist->pdev,
152 PCI_EXP_LNKCTL,
149 linkctl)) { 153 linkctl)) {
150 err = -ENODEV; 154 err = -ENODEV;
151 mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n"); 155 mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
@@ -157,7 +161,8 @@ int mlx4_reset(struct mlx4_dev *dev)
157 if (i * 4 == PCI_COMMAND) 161 if (i * 4 == PCI_COMMAND)
158 continue; 162 continue;
159 163
160 if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { 164 if (pci_write_config_dword(dev->persist->pdev, i * 4,
165 hca_header[i])) {
161 err = -ENODEV; 166 err = -ENODEV;
162 mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n", 167 mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
163 i); 168 i);
@@ -165,7 +170,7 @@ int mlx4_reset(struct mlx4_dev *dev)
165 } 170 }
166 } 171 }
167 172
168 if (pci_write_config_dword(dev->pdev, PCI_COMMAND, 173 if (pci_write_config_dword(dev->persist->pdev, PCI_COMMAND,
169 hca_header[PCI_COMMAND / 4])) { 174 hca_header[PCI_COMMAND / 4])) {
170 err = -ENODEV; 175 err = -ENODEV;
171 mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n"); 176 mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 4efbd1eca611..486e3d26cd4a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -309,12 +309,13 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
309 int allocated, free, reserved, guaranteed, from_free; 309 int allocated, free, reserved, guaranteed, from_free;
310 int from_rsvd; 310 int from_rsvd;
311 311
312 if (slave > dev->num_vfs) 312 if (slave > dev->persist->num_vfs)
313 return -EINVAL; 313 return -EINVAL;
314 314
315 spin_lock(&res_alloc->alloc_lock); 315 spin_lock(&res_alloc->alloc_lock);
316 allocated = (port > 0) ? 316 allocated = (port > 0) ?
317 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : 317 res_alloc->allocated[(port - 1) *
318 (dev->persist->num_vfs + 1) + slave] :
318 res_alloc->allocated[slave]; 319 res_alloc->allocated[slave];
319 free = (port > 0) ? res_alloc->res_port_free[port - 1] : 320 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
320 res_alloc->res_free; 321 res_alloc->res_free;
@@ -352,7 +353,8 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
352 if (!err) { 353 if (!err) {
353 /* grant the request */ 354 /* grant the request */
354 if (port > 0) { 355 if (port > 0) {
355 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count; 356 res_alloc->allocated[(port - 1) *
357 (dev->persist->num_vfs + 1) + slave] += count;
356 res_alloc->res_port_free[port - 1] -= count; 358 res_alloc->res_port_free[port - 1] -= count;
357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd; 359 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
358 } else { 360 } else {
@@ -376,13 +378,14 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
376 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 378 &priv->mfunc.master.res_tracker.res_alloc[res_type];
377 int allocated, guaranteed, from_rsvd; 379 int allocated, guaranteed, from_rsvd;
378 380
379 if (slave > dev->num_vfs) 381 if (slave > dev->persist->num_vfs)
380 return; 382 return;
381 383
382 spin_lock(&res_alloc->alloc_lock); 384 spin_lock(&res_alloc->alloc_lock);
383 385
384 allocated = (port > 0) ? 386 allocated = (port > 0) ?
385 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : 387 res_alloc->allocated[(port - 1) *
388 (dev->persist->num_vfs + 1) + slave] :
386 res_alloc->allocated[slave]; 389 res_alloc->allocated[slave];
387 guaranteed = res_alloc->guaranteed[slave]; 390 guaranteed = res_alloc->guaranteed[slave];
388 391
@@ -397,7 +400,8 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
397 } 400 }
398 401
399 if (port > 0) { 402 if (port > 0) {
400 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count; 403 res_alloc->allocated[(port - 1) *
404 (dev->persist->num_vfs + 1) + slave] -= count;
401 res_alloc->res_port_free[port - 1] += count; 405 res_alloc->res_port_free[port - 1] += count;
402 res_alloc->res_port_rsvd[port - 1] += from_rsvd; 406 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
403 } else { 407 } else {
@@ -415,7 +419,8 @@ static inline void initialize_res_quotas(struct mlx4_dev *dev,
415 enum mlx4_resource res_type, 419 enum mlx4_resource res_type,
416 int vf, int num_instances) 420 int vf, int num_instances)
417{ 421{
418 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1)); 422 res_alloc->guaranteed[vf] = num_instances /
423 (2 * (dev->persist->num_vfs + 1));
419 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; 424 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
420 if (vf == mlx4_master_func_num(dev)) { 425 if (vf == mlx4_master_func_num(dev)) {
421 res_alloc->res_free = num_instances; 426 res_alloc->res_free = num_instances;
@@ -486,21 +491,26 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
486 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 491 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
487 struct resource_allocator *res_alloc = 492 struct resource_allocator *res_alloc =
488 &priv->mfunc.master.res_tracker.res_alloc[i]; 493 &priv->mfunc.master.res_tracker.res_alloc[i];
489 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); 494 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
490 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); 495 sizeof(int), GFP_KERNEL);
496 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
497 sizeof(int), GFP_KERNEL);
491 if (i == RES_MAC || i == RES_VLAN) 498 if (i == RES_MAC || i == RES_VLAN)
492 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * 499 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
493 (dev->num_vfs + 1) * sizeof(int), 500 (dev->persist->num_vfs
494 GFP_KERNEL); 501 + 1) *
502 sizeof(int), GFP_KERNEL);
495 else 503 else
496 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); 504 res_alloc->allocated = kzalloc((dev->persist->
505 num_vfs + 1) *
506 sizeof(int), GFP_KERNEL);
497 507
498 if (!res_alloc->quota || !res_alloc->guaranteed || 508 if (!res_alloc->quota || !res_alloc->guaranteed ||
499 !res_alloc->allocated) 509 !res_alloc->allocated)
500 goto no_mem_err; 510 goto no_mem_err;
501 511
502 spin_lock_init(&res_alloc->alloc_lock); 512 spin_lock_init(&res_alloc->alloc_lock);
503 for (t = 0; t < dev->num_vfs + 1; t++) { 513 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
504 struct mlx4_active_ports actv_ports = 514 struct mlx4_active_ports actv_ports =
505 mlx4_get_active_ports(dev, t); 515 mlx4_get_active_ports(dev, t);
506 switch (i) { 516 switch (i) {
@@ -2531,7 +2541,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2531 /* Make sure that the PD bits related to the slave id are zeros. */ 2541 /* Make sure that the PD bits related to the slave id are zeros. */
2532 pd = mr_get_pd(inbox->buf); 2542 pd = mr_get_pd(inbox->buf);
2533 pd_slave = (pd >> 17) & 0x7f; 2543 pd_slave = (pd >> 17) & 0x7f;
2534 if (pd_slave != 0 && pd_slave != slave) { 2544 if (pd_slave != 0 && --pd_slave != slave) {
2535 err = -EPERM; 2545 err = -EPERM;
2536 goto ex_abort; 2546 goto ex_abort;
2537 } 2547 }
@@ -2934,6 +2944,9 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
2934 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 2944 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2935 optpar = be32_to_cpu(*(__be32 *) inbox->buf); 2945 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2936 2946
2947 if (slave != mlx4_master_func_num(dev))
2948 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
2949
2937 switch (qp_type) { 2950 switch (qp_type) {
2938 case MLX4_QP_ST_RC: 2951 case MLX4_QP_ST_RC:
2939 case MLX4_QP_ST_XRC: 2952 case MLX4_QP_ST_XRC:
@@ -4667,7 +4680,6 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4667 int state; 4680 int state;
4668 LIST_HEAD(tlist); 4681 LIST_HEAD(tlist);
4669 int eqn; 4682 int eqn;
4670 struct mlx4_cmd_mailbox *mailbox;
4671 4683
4672 err = move_all_busy(dev, slave, RES_EQ); 4684 err = move_all_busy(dev, slave, RES_EQ);
4673 if (err) 4685 if (err)
@@ -4693,20 +4705,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4693 break; 4705 break;
4694 4706
4695 case RES_EQ_HW: 4707 case RES_EQ_HW:
4696 mailbox = mlx4_alloc_cmd_mailbox(dev); 4708 err = mlx4_cmd(dev, slave, eqn & 0xff,
4697 if (IS_ERR(mailbox)) { 4709 1, MLX4_CMD_HW2SW_EQ,
4698 cond_resched(); 4710 MLX4_CMD_TIME_CLASS_A,
4699 continue; 4711 MLX4_CMD_NATIVE);
4700 }
4701 err = mlx4_cmd_box(dev, slave, 0,
4702 eqn & 0xff, 0,
4703 MLX4_CMD_HW2SW_EQ,
4704 MLX4_CMD_TIME_CLASS_A,
4705 MLX4_CMD_NATIVE);
4706 if (err) 4712 if (err)
4707 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", 4713 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4708 slave, eqn); 4714 slave, eqn);
4709 mlx4_free_cmd_mailbox(dev, mailbox);
4710 atomic_dec(&eq->mtt->ref_count); 4715 atomic_dec(&eq->mtt->ref_count);
4711 state = RES_EQ_RESERVED; 4716 state = RES_EQ_RESERVED;
4712 break; 4717 break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 56779c1c7811..201ca6d76ce5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -121,7 +121,7 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
121 dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, 121 dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
122 buf->direct.map); 122 buf->direct.map);
123 else { 123 else {
124 if (BITS_PER_LONG == 64 && buf->direct.buf) 124 if (BITS_PER_LONG == 64)
125 vunmap(buf->direct.buf); 125 vunmap(buf->direct.buf);
126 126
127 for (i = 0; i < buf->nbufs; i++) 127 for (i = 0; i < buf->nbufs; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 10e1f1a18255..4878025e231c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -300,11 +300,11 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
300 param = qp->pid; 300 param = qp->pid;
301 break; 301 break;
302 case QP_STATE: 302 case QP_STATE:
303 param = (u64)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28); 303 param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
304 *is_str = 1; 304 *is_str = 1;
305 break; 305 break;
306 case QP_XPORT: 306 case QP_XPORT:
307 param = (u64)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff); 307 param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
308 *is_str = 1; 308 *is_str = 1;
309 break; 309 break;
310 case QP_MTU: 310 case QP_MTU:
@@ -464,7 +464,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
464 464
465 465
466 if (is_str) 466 if (is_str)
467 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)field); 467 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
468 else 468 else
469 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); 469 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
470 470
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3f4525619a07..d6651937d899 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -903,12 +903,12 @@ static void remove_one(struct pci_dev *pdev)
903} 903}
904 904
905static const struct pci_device_id mlx5_core_pci_table[] = { 905static const struct pci_device_id mlx5_core_pci_table[] = {
906 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ 906 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
907 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ 907 { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */
908 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ 908 { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */
909 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ 909 { PCI_VDEVICE(MELLANOX, 0x1014) }, /* ConnectX-4 VF */
910 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ 910 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
911 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ 911 { PCI_VDEVICE(MELLANOX, 0x1016) }, /* ConnectX-4LX VF */
912 { 0, } 912 { 0, }
913}; 913};
914 914
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 2fa6ae026e4f..10988fbf47eb 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4342,9 +4342,7 @@ static void ksz_init_timer(struct ksz_timer_info *info, int period,
4342{ 4342{
4343 info->max = 0; 4343 info->max = 0;
4344 info->period = period; 4344 info->period = period;
4345 init_timer(&info->timer); 4345 setup_timer(&info->timer, function, (unsigned long)data);
4346 info->timer.function = function;
4347 info->timer.data = (unsigned long) data;
4348} 4346}
4349 4347
4350static void ksz_update_timer(struct ksz_timer_info *info) 4348static void ksz_update_timer(struct ksz_timer_info *info)
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 71af98bb72cb..1412f5af05ec 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4226,8 +4226,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
4226 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 4226 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
4227#endif 4227#endif
4228 myri10ge_free_slices(mgp); 4228 myri10ge_free_slices(mgp);
4229 if (mgp->msix_vectors != NULL) 4229 kfree(mgp->msix_vectors);
4230 kfree(mgp->msix_vectors);
4231 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 4230 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
4232 mgp->cmd, mgp->cmd_bus); 4231 mgp->cmd, mgp->cmd_bus);
4233 4232
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 2552e550a78c..eb807b0dc72a 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1122,12 +1122,12 @@ again:
1122 } 1122 }
1123 1123
1124#ifdef NS83820_VLAN_ACCEL_SUPPORT 1124#ifdef NS83820_VLAN_ACCEL_SUPPORT
1125 if(vlan_tx_tag_present(skb)) { 1125 if (skb_vlan_tag_present(skb)) {
1126 /* fetch the vlan tag info out of the 1126 /* fetch the vlan tag info out of the
1127 * ancillary data if the vlan code 1127 * ancillary data if the vlan code
1128 * is using hw vlan acceleration 1128 * is using hw vlan acceleration
1129 */ 1129 */
1130 short tag = vlan_tx_tag_get(skb); 1130 short tag = skb_vlan_tag_get(skb);
1131 extsts |= (EXTSTS_VPKT | htons(tag)); 1131 extsts |= (EXTSTS_VPKT | htons(tag));
1132 } 1132 }
1133#endif 1133#endif
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index db0c7a9aee60..a4cdf2f8041a 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4045,8 +4045,8 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4045 } 4045 }
4046 4046
4047 queue = 0; 4047 queue = 0;
4048 if (vlan_tx_tag_present(skb)) 4048 if (skb_vlan_tag_present(skb))
4049 vlan_tag = vlan_tx_tag_get(skb); 4049 vlan_tag = skb_vlan_tag_get(skb);
4050 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) { 4050 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4051 if (skb->protocol == htons(ETH_P_IP)) { 4051 if (skb->protocol == htons(ETH_P_IP)) {
4052 struct iphdr *ip; 4052 struct iphdr *ip;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 2bbd01fcb9b0..6223930a8155 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -4637,7 +4637,7 @@ static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4637 vpath->ringh = NULL; 4637 vpath->ringh = NULL;
4638 vpath->fifoh = NULL; 4638 vpath->fifoh = NULL;
4639 memset(&vpath->vpath_handles, 0, sizeof(struct list_head)); 4639 memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
4640 vpath->stats_block = 0; 4640 vpath->stats_block = NULL;
4641 vpath->hw_stats = NULL; 4641 vpath->hw_stats = NULL;
4642 vpath->hw_stats_sav = NULL; 4642 vpath->hw_stats_sav = NULL;
4643 vpath->sw_stats = NULL; 4643 vpath->sw_stats = NULL;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index cc0485e3c621..50d5604833ed 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -890,8 +890,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
890 dev->name, __func__, __LINE__, 890 dev->name, __func__, __LINE__,
891 fifo_hw, dtr, dtr_priv); 891 fifo_hw, dtr, dtr_priv);
892 892
893 if (vlan_tx_tag_present(skb)) { 893 if (skb_vlan_tag_present(skb)) {
894 u16 vlan_tag = vlan_tx_tag_get(skb); 894 u16 vlan_tag = skb_vlan_tag_get(skb);
895 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); 895 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
896 } 896 }
897 897
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index f39cae620f61..a41bb5e6b954 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2462,9 +2462,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2462 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2462 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2463 2463
2464 /* vlan tag */ 2464 /* vlan tag */
2465 if (vlan_tx_tag_present(skb)) 2465 if (skb_vlan_tag_present(skb))
2466 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | 2466 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2467 vlan_tx_tag_get(skb)); 2467 skb_vlan_tag_get(skb));
2468 else 2468 else
2469 start_tx->txvlan = 0; 2469 start_tx->txvlan = 0;
2470 2470
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index c531c8ae1be4..e0c31e3947d1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -176,9 +176,7 @@ netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
176static void 176static void
177netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) 177netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
178{ 178{
179 if (recv_ctx->sds_rings != NULL) 179 kfree(recv_ctx->sds_rings);
180 kfree(recv_ctx->sds_rings);
181
182 recv_ctx->sds_rings = NULL; 180 recv_ctx->sds_rings = NULL;
183} 181}
184 182
@@ -1893,9 +1891,9 @@ netxen_tso_check(struct net_device *netdev,
1893 protocol = vh->h_vlan_encapsulated_proto; 1891 protocol = vh->h_vlan_encapsulated_proto;
1894 flags = FLAGS_VLAN_TAGGED; 1892 flags = FLAGS_VLAN_TAGGED;
1895 1893
1896 } else if (vlan_tx_tag_present(skb)) { 1894 } else if (skb_vlan_tag_present(skb)) {
1897 flags = FLAGS_VLAN_OOB; 1895 flags = FLAGS_VLAN_OOB;
1898 vid = vlan_tx_tag_get(skb); 1896 vid = skb_vlan_tag_get(skb);
1899 netxen_set_tx_vlan_tci(first_desc, vid); 1897 netxen_set_tx_vlan_tci(first_desc, vid);
1900 vlan_oob = 1; 1898 vlan_oob = 1;
1901 } 1899 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 4e1f58cf19ce..d4b5085a21fa 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -10,6 +10,7 @@
10#include <net/ip.h> 10#include <net/ip.h>
11#include <linux/ipv6.h> 11#include <linux/ipv6.h>
12#include <net/checksum.h> 12#include <net/checksum.h>
13#include <linux/printk.h>
13 14
14#include "qlcnic.h" 15#include "qlcnic.h"
15 16
@@ -320,8 +321,8 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
320 if (protocol == ETH_P_8021Q) { 321 if (protocol == ETH_P_8021Q) {
321 vh = (struct vlan_ethhdr *)skb->data; 322 vh = (struct vlan_ethhdr *)skb->data;
322 vlan_id = ntohs(vh->h_vlan_TCI); 323 vlan_id = ntohs(vh->h_vlan_TCI);
323 } else if (vlan_tx_tag_present(skb)) { 324 } else if (skb_vlan_tag_present(skb)) {
324 vlan_id = vlan_tx_tag_get(skb); 325 vlan_id = skb_vlan_tag_get(skb);
325 } 326 }
326 } 327 }
327 328
@@ -472,9 +473,9 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
472 flags = QLCNIC_FLAGS_VLAN_TAGGED; 473 flags = QLCNIC_FLAGS_VLAN_TAGGED;
473 vlan_tci = ntohs(vh->h_vlan_TCI); 474 vlan_tci = ntohs(vh->h_vlan_TCI);
474 protocol = ntohs(vh->h_vlan_encapsulated_proto); 475 protocol = ntohs(vh->h_vlan_encapsulated_proto);
475 } else if (vlan_tx_tag_present(skb)) { 476 } else if (skb_vlan_tag_present(skb)) {
476 flags = QLCNIC_FLAGS_VLAN_OOB; 477 flags = QLCNIC_FLAGS_VLAN_OOB;
477 vlan_tci = vlan_tx_tag_get(skb); 478 vlan_tci = skb_vlan_tag_get(skb);
478 } 479 }
479 if (unlikely(adapter->tx_pvid)) { 480 if (unlikely(adapter->tx_pvid)) {
480 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) 481 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
@@ -1473,14 +1474,14 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1473 1474
1474static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter) 1475static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1475{ 1476{
1476 int i; 1477 if (adapter->ahw->msg_enable & NETIF_MSG_DRV) {
1477 unsigned char *data = skb->data; 1478 char prefix[30];
1478 1479
1479 pr_info(KERN_INFO "\n"); 1480 scnprintf(prefix, sizeof(prefix), "%s: %s: ",
1480 for (i = 0; i < skb->len; i++) { 1481 dev_name(&adapter->pdev->dev), __func__);
1481 QLCDB(adapter, DRV, "%02x ", data[i]); 1482
1482 if ((i & 0x0f) == 8) 1483 print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1,
1483 pr_info(KERN_INFO "\n"); 1484 skb->data, skb->len, true);
1484 } 1485 }
1485} 1486}
1486 1487
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2528c3fb6b90..a430a34a4434 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -294,9 +294,7 @@ int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
294 294
295void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx) 295void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
296{ 296{
297 if (recv_ctx->sds_rings != NULL) 297 kfree(recv_ctx->sds_rings);
298 kfree(recv_ctx->sds_rings);
299
300 recv_ctx->sds_rings = NULL; 298 recv_ctx->sds_rings = NULL;
301} 299}
302 300
@@ -1257,8 +1255,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
1257 if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) { 1255 if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
1258 if (fw_dump->tmpl_hdr == NULL || 1256 if (fw_dump->tmpl_hdr == NULL ||
1259 adapter->fw_version > prev_fw_version) { 1257 adapter->fw_version > prev_fw_version) {
1260 if (fw_dump->tmpl_hdr) 1258 vfree(fw_dump->tmpl_hdr);
1261 vfree(fw_dump->tmpl_hdr);
1262 if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) 1259 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
1263 dev_info(&pdev->dev, 1260 dev_info(&pdev->dev,
1264 "Supports FW dump capability\n"); 1261 "Supports FW dump capability\n");
@@ -2374,13 +2371,12 @@ void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
2374 2371
2375 for (ring = 0; ring < adapter->drv_tx_rings; ring++) { 2372 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
2376 tx_ring = &adapter->tx_ring[ring]; 2373 tx_ring = &adapter->tx_ring[ring];
2377 if (tx_ring && tx_ring->cmd_buf_arr != NULL) { 2374 if (tx_ring) {
2378 vfree(tx_ring->cmd_buf_arr); 2375 vfree(tx_ring->cmd_buf_arr);
2379 tx_ring->cmd_buf_arr = NULL; 2376 tx_ring->cmd_buf_arr = NULL;
2380 } 2377 }
2381 } 2378 }
2382 if (adapter->tx_ring != NULL) 2379 kfree(adapter->tx_ring);
2383 kfree(adapter->tx_ring);
2384} 2380}
2385 2381
2386int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, 2382int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
@@ -2758,13 +2754,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
2758 } 2754 }
2759 2755
2760 qlcnic_dcb_free(adapter->dcb); 2756 qlcnic_dcb_free(adapter->dcb);
2761
2762 qlcnic_detach(adapter); 2757 qlcnic_detach(adapter);
2763 2758 kfree(adapter->npars);
2764 if (adapter->npars != NULL) 2759 kfree(adapter->eswitch);
2765 kfree(adapter->npars);
2766 if (adapter->eswitch != NULL)
2767 kfree(adapter->eswitch);
2768 2760
2769 if (qlcnic_82xx_check(adapter)) 2761 if (qlcnic_82xx_check(adapter))
2770 qlcnic_clr_all_drv_state(adapter, 0); 2762 qlcnic_clr_all_drv_state(adapter, 0);
@@ -2932,13 +2924,13 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
2932 2924
2933static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter) 2925static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
2934{ 2926{
2935 if (adapter->fhash.fmax && adapter->fhash.fhead) 2927 if (adapter->fhash.fmax)
2936 kfree(adapter->fhash.fhead); 2928 kfree(adapter->fhash.fhead);
2937 2929
2938 adapter->fhash.fhead = NULL; 2930 adapter->fhash.fhead = NULL;
2939 adapter->fhash.fmax = 0; 2931 adapter->fhash.fmax = 0;
2940 2932
2941 if (adapter->rx_fhash.fmax && adapter->rx_fhash.fhead) 2933 if (adapter->rx_fhash.fmax)
2942 kfree(adapter->rx_fhash.fhead); 2934 kfree(adapter->rx_fhash.fhead);
2943 2935
2944 adapter->rx_fhash.fmax = 0; 2936 adapter->rx_fhash.fmax = 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index c9f57fb84b9e..332bb8a3f430 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1407,8 +1407,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
1407 current_version = qlcnic_83xx_get_fw_version(adapter); 1407 current_version = qlcnic_83xx_get_fw_version(adapter);
1408 1408
1409 if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { 1409 if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
1410 if (fw_dump->tmpl_hdr) 1410 vfree(fw_dump->tmpl_hdr);
1411 vfree(fw_dump->tmpl_hdr);
1412 if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) 1411 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
1413 dev_info(&pdev->dev, "Supports FW dump capability\n"); 1412 dev_info(&pdev->dev, "Supports FW dump capability\n");
1414 } 1413 }
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index ef5aed3b1225..8011ef3e7707 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2666,11 +2666,11 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2666 2666
2667 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); 2667 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2668 2668
2669 if (vlan_tx_tag_present(skb)) { 2669 if (skb_vlan_tag_present(skb)) {
2670 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, 2670 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2671 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); 2671 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2672 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; 2672 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2673 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); 2673 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2674 } 2674 }
2675 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); 2675 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2676 if (tso < 0) { 2676 if (tso < 0) {
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 9c31e46d1eee..d79e33b3c191 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -708,8 +708,8 @@ static void cp_tx (struct cp_private *cp)
708 708
709static inline u32 cp_tx_vlan_tag(struct sk_buff *skb) 709static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
710{ 710{
711 return vlan_tx_tag_present(skb) ? 711 return skb_vlan_tag_present(skb) ?
712 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 712 TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
713} 713}
714 714
715static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, 715static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index fa274e0f47d7..ad0020af2193 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2073,8 +2073,8 @@ static int rtl8169_set_features(struct net_device *dev,
2073 2073
2074static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) 2074static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
2075{ 2075{
2076 return (vlan_tx_tag_present(skb)) ? 2076 return (skb_vlan_tag_present(skb)) ?
2077 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 2077 TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
2078} 2078}
2079 2079
2080static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) 2080static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
@@ -7049,6 +7049,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7049 u32 status, len; 7049 u32 status, len;
7050 u32 opts[2]; 7050 u32 opts[2];
7051 int frags; 7051 int frags;
7052 bool stop_queue;
7052 7053
7053 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { 7054 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
7054 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 7055 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7105,11 +7106,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7105 7106
7106 tp->cur_tx += frags + 1; 7107 tp->cur_tx += frags + 1;
7107 7108
7108 RTL_W8(TxPoll, NPQ); 7109 stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
7109 7110
7110 mmiowb(); 7111 if (!skb->xmit_more || stop_queue ||
7112 netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
7113 RTL_W8(TxPoll, NPQ);
7114
7115 mmiowb();
7116 }
7111 7117
7112 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { 7118 if (stop_queue) {
7113 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must 7119 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
7114 * not miss a ring update when it notices a stopped queue. 7120 * not miss a ring update when it notices a stopped queue.
7115 */ 7121 */
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 04283fe0e6a7..4da8bd263997 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -597,7 +597,7 @@ static struct sh_eth_cpu_data sh7757_data = {
597static void sh_eth_chip_reset_giga(struct net_device *ndev) 597static void sh_eth_chip_reset_giga(struct net_device *ndev)
598{ 598{
599 int i; 599 int i;
600 unsigned long mahr[2], malr[2]; 600 u32 mahr[2], malr[2];
601 601
602 /* save MAHR and MALR */ 602 /* save MAHR and MALR */
603 for (i = 0; i < 2; i++) { 603 for (i = 0; i < 2; i++) {
@@ -991,7 +991,7 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
991 } 991 }
992} 992}
993 993
994static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) 994static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
995{ 995{
996 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) 996 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
997 return EDTRR_TRNS_GETHER; 997 return EDTRR_TRNS_GETHER;
@@ -1565,7 +1565,7 @@ static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1565} 1565}
1566 1566
1567/* error control function */ 1567/* error control function */
1568static void sh_eth_error(struct net_device *ndev, int intr_status) 1568static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1569{ 1569{
1570 struct sh_eth_private *mdp = netdev_priv(ndev); 1570 struct sh_eth_private *mdp = netdev_priv(ndev);
1571 u32 felic_stat; 1571 u32 felic_stat;
@@ -1678,7 +1678,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1678 struct sh_eth_private *mdp = netdev_priv(ndev); 1678 struct sh_eth_private *mdp = netdev_priv(ndev);
1679 struct sh_eth_cpu_data *cd = mdp->cd; 1679 struct sh_eth_cpu_data *cd = mdp->cd;
1680 irqreturn_t ret = IRQ_NONE; 1680 irqreturn_t ret = IRQ_NONE;
1681 unsigned long intr_status, intr_enable; 1681 u32 intr_status, intr_enable;
1682 1682
1683 spin_lock(&mdp->lock); 1683 spin_lock(&mdp->lock);
1684 1684
@@ -1709,7 +1709,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1709 __napi_schedule(&mdp->napi); 1709 __napi_schedule(&mdp->napi);
1710 } else { 1710 } else {
1711 netdev_warn(ndev, 1711 netdev_warn(ndev,
1712 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n", 1712 "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1713 intr_status, intr_enable); 1713 intr_status, intr_enable);
1714 } 1714 }
1715 } 1715 }
@@ -1742,7 +1742,7 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
1742 napi); 1742 napi);
1743 struct net_device *ndev = napi->dev; 1743 struct net_device *ndev = napi->dev;
1744 int quota = budget; 1744 int quota = budget;
1745 unsigned long intr_status; 1745 u32 intr_status;
1746 1746
1747 for (;;) { 1747 for (;;) {
1748 intr_status = sh_eth_read(ndev, EESR); 1748 intr_status = sh_eth_read(ndev, EESR);
@@ -2133,7 +2133,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
2133 2133
2134 netif_err(mdp, timer, ndev, 2134 netif_err(mdp, timer, ndev,
2135 "transmit timed out, status %8.8x, resetting...\n", 2135 "transmit timed out, status %8.8x, resetting...\n",
2136 (int)sh_eth_read(ndev, EESR)); 2136 sh_eth_read(ndev, EESR));
2137 2137
2138 /* tx_errors count up */ 2138 /* tx_errors count up */
2139 ndev->stats.tx_errors++; 2139 ndev->stats.tx_errors++;
@@ -3019,6 +3019,36 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
3019} 3019}
3020 3020
3021#ifdef CONFIG_PM 3021#ifdef CONFIG_PM
3022#ifdef CONFIG_PM_SLEEP
3023static int sh_eth_suspend(struct device *dev)
3024{
3025 struct net_device *ndev = dev_get_drvdata(dev);
3026 int ret = 0;
3027
3028 if (netif_running(ndev)) {
3029 netif_device_detach(ndev);
3030 ret = sh_eth_close(ndev);
3031 }
3032
3033 return ret;
3034}
3035
3036static int sh_eth_resume(struct device *dev)
3037{
3038 struct net_device *ndev = dev_get_drvdata(dev);
3039 int ret = 0;
3040
3041 if (netif_running(ndev)) {
3042 ret = sh_eth_open(ndev);
3043 if (ret < 0)
3044 return ret;
3045 netif_device_attach(ndev);
3046 }
3047
3048 return ret;
3049}
3050#endif
3051
3022static int sh_eth_runtime_nop(struct device *dev) 3052static int sh_eth_runtime_nop(struct device *dev)
3023{ 3053{
3024 /* Runtime PM callback shared between ->runtime_suspend() 3054 /* Runtime PM callback shared between ->runtime_suspend()
@@ -3032,8 +3062,8 @@ static int sh_eth_runtime_nop(struct device *dev)
3032} 3062}
3033 3063
3034static const struct dev_pm_ops sh_eth_dev_pm_ops = { 3064static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3035 .runtime_suspend = sh_eth_runtime_nop, 3065 SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3036 .runtime_resume = sh_eth_runtime_nop, 3066 SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3037}; 3067};
3038#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops) 3068#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3039#else 3069#else
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 332d3c16d483..259d03f353e1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -459,21 +459,21 @@ struct sh_eth_cpu_data {
459 459
460 /* mandatory initialize value */ 460 /* mandatory initialize value */
461 int register_type; 461 int register_type;
462 unsigned long eesipr_value; 462 u32 eesipr_value;
463 463
464 /* optional initialize value */ 464 /* optional initialize value */
465 unsigned long ecsr_value; 465 u32 ecsr_value;
466 unsigned long ecsipr_value; 466 u32 ecsipr_value;
467 unsigned long fdr_value; 467 u32 fdr_value;
468 unsigned long fcftr_value; 468 u32 fcftr_value;
469 unsigned long rpadir_value; 469 u32 rpadir_value;
470 470
471 /* interrupt checking mask */ 471 /* interrupt checking mask */
472 unsigned long tx_check; 472 u32 tx_check;
473 unsigned long eesr_err_check; 473 u32 eesr_err_check;
474 474
475 /* Error mask */ 475 /* Error mask */
476 unsigned long trscer_err_mask; 476 u32 trscer_err_mask;
477 477
478 /* hardware features */ 478 /* hardware features */
479 unsigned long irq_flags; /* IRQ configuration flags */ 479 unsigned long irq_flags; /* IRQ configuration flags */
@@ -543,7 +543,7 @@ static inline void sh_eth_soft_swap(char *src, int len)
543#endif 543#endif
544} 544}
545 545
546static inline void sh_eth_write(struct net_device *ndev, unsigned long data, 546static inline void sh_eth_write(struct net_device *ndev, u32 data,
547 int enum_index) 547 int enum_index)
548{ 548{
549 struct sh_eth_private *mdp = netdev_priv(ndev); 549 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -551,8 +551,7 @@ static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
551 iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]); 551 iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]);
552} 552}
553 553
554static inline unsigned long sh_eth_read(struct net_device *ndev, 554static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
555 int enum_index)
556{ 555{
557 struct sh_eth_private *mdp = netdev_priv(ndev); 556 struct sh_eth_private *mdp = netdev_priv(ndev);
558 557
@@ -565,14 +564,13 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
565 return mdp->tsu_addr + mdp->reg_offset[enum_index]; 564 return mdp->tsu_addr + mdp->reg_offset[enum_index];
566} 565}
567 566
568static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, 567static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
569 unsigned long data, int enum_index) 568 int enum_index)
570{ 569{
571 iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); 570 iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
572} 571}
573 572
574static inline unsigned long sh_eth_tsu_read(struct sh_eth_private *mdp, 573static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
575 int enum_index)
576{ 574{
577 return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); 575 return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
578} 576}
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2f398fa4b9e6..34389b6aa67c 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -806,13 +806,13 @@ static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
806 806
807static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info) 807static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
808{ 808{
809 return (void *) desc_info->desc->cookie; 809 return (void *)(uintptr_t)desc_info->desc->cookie;
810} 810}
811 811
812static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info, 812static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
813 void *ptr) 813 void *ptr)
814{ 814{
815 desc_info->desc->cookie = (long) ptr; 815 desc_info->desc->cookie = (uintptr_t) ptr;
816} 816}
817 817
818static struct rocker_desc_info * 818static struct rocker_desc_info *
@@ -3026,11 +3026,17 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
3026 container_of(work, struct rocker_fdb_learn_work, work); 3026 container_of(work, struct rocker_fdb_learn_work, work);
3027 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE); 3027 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3028 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED); 3028 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3029 struct netdev_switch_notifier_fdb_info info;
3030
3031 info.addr = lw->addr;
3032 info.vid = lw->vid;
3029 3033
3030 if (learned && removing) 3034 if (learned && removing)
3031 br_fdb_external_learn_del(lw->dev, lw->addr, lw->vid); 3035 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
3036 lw->dev, &info.info);
3032 else if (learned && !removing) 3037 else if (learned && !removing)
3033 br_fdb_external_learn_add(lw->dev, lw->addr, lw->vid); 3038 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
3039 lw->dev, &info.info);
3034 3040
3035 kfree(work); 3041 kfree(work);
3036} 3042}
@@ -3565,6 +3571,8 @@ nest_cancel:
3565 rocker_tlv_nest_cancel(desc_info, frags); 3571 rocker_tlv_nest_cancel(desc_info, frags);
3566out: 3572out:
3567 dev_kfree_skb(skb); 3573 dev_kfree_skb(skb);
3574 dev->stats.tx_dropped++;
3575
3568 return NETDEV_TX_OK; 3576 return NETDEV_TX_OK;
3569} 3577}
3570 3578
@@ -3668,7 +3676,8 @@ static int rocker_fdb_fill_info(struct sk_buff *skb,
3668 if (vid && nla_put_u16(skb, NDA_VLAN, vid)) 3676 if (vid && nla_put_u16(skb, NDA_VLAN, vid))
3669 goto nla_put_failure; 3677 goto nla_put_failure;
3670 3678
3671 return nlmsg_end(skb, nlh); 3679 nlmsg_end(skb, nlh);
3680 return 0;
3672 3681
3673nla_put_failure: 3682nla_put_failure:
3674 nlmsg_cancel(skb, nlh); 3683 nlmsg_cancel(skb, nlh);
@@ -3713,7 +3722,7 @@ skip:
3713} 3722}
3714 3723
3715static int rocker_port_bridge_setlink(struct net_device *dev, 3724static int rocker_port_bridge_setlink(struct net_device *dev,
3716 struct nlmsghdr *nlh) 3725 struct nlmsghdr *nlh, u16 flags)
3717{ 3726{
3718 struct rocker_port *rocker_port = netdev_priv(dev); 3727 struct rocker_port *rocker_port = netdev_priv(dev);
3719 struct nlattr *protinfo; 3728 struct nlattr *protinfo;
@@ -3824,11 +3833,145 @@ static void rocker_port_get_drvinfo(struct net_device *dev,
3824 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); 3833 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
3825} 3834}
3826 3835
3836static struct rocker_port_stats {
3837 char str[ETH_GSTRING_LEN];
3838 int type;
3839} rocker_port_stats[] = {
3840 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
3841 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
3842 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
3843 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
3844
3845 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
3846 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
3847 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
3848 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
3849};
3850
3851#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
3852
3853static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
3854 u8 *data)
3855{
3856 u8 *p = data;
3857 int i;
3858
3859 switch (stringset) {
3860 case ETH_SS_STATS:
3861 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
3862 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
3863 p += ETH_GSTRING_LEN;
3864 }
3865 break;
3866 }
3867}
3868
3869static int
3870rocker_cmd_get_port_stats_prep(struct rocker *rocker,
3871 struct rocker_port *rocker_port,
3872 struct rocker_desc_info *desc_info,
3873 void *priv)
3874{
3875 struct rocker_tlv *cmd_stats;
3876
3877 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
3878 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
3879 return -EMSGSIZE;
3880
3881 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
3882 if (!cmd_stats)
3883 return -EMSGSIZE;
3884
3885 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_LPORT,
3886 rocker_port->lport))
3887 return -EMSGSIZE;
3888
3889 rocker_tlv_nest_end(desc_info, cmd_stats);
3890
3891 return 0;
3892}
3893
3894static int
3895rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
3896 struct rocker_port *rocker_port,
3897 struct rocker_desc_info *desc_info,
3898 void *priv)
3899{
3900 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
3901 struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
3902 struct rocker_tlv *pattr;
3903 u32 lport;
3904 u64 *data = priv;
3905 int i;
3906
3907 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
3908
3909 if (!attrs[ROCKER_TLV_CMD_INFO])
3910 return -EIO;
3911
3912 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
3913 attrs[ROCKER_TLV_CMD_INFO]);
3914
3915 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT])
3916 return -EIO;
3917
3918 lport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]);
3919 if (lport != rocker_port->lport)
3920 return -EIO;
3921
3922 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
3923 pattr = stats_attrs[rocker_port_stats[i].type];
3924 if (!pattr)
3925 continue;
3926
3927 data[i] = rocker_tlv_get_u64(pattr);
3928 }
3929
3930 return 0;
3931}
3932
3933static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
3934 void *priv)
3935{
3936 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
3937 rocker_cmd_get_port_stats_prep, NULL,
3938 rocker_cmd_get_port_stats_ethtool_proc,
3939 priv, false);
3940}
3941
3942static void rocker_port_get_stats(struct net_device *dev,
3943 struct ethtool_stats *stats, u64 *data)
3944{
3945 struct rocker_port *rocker_port = netdev_priv(dev);
3946
3947 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
3948 int i;
3949
3950 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
3951 data[i] = 0;
3952 }
3953
3954 return;
3955}
3956
3957static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
3958{
3959 switch (sset) {
3960 case ETH_SS_STATS:
3961 return ROCKER_PORT_STATS_LEN;
3962 default:
3963 return -EOPNOTSUPP;
3964 }
3965}
3966
3827static const struct ethtool_ops rocker_port_ethtool_ops = { 3967static const struct ethtool_ops rocker_port_ethtool_ops = {
3828 .get_settings = rocker_port_get_settings, 3968 .get_settings = rocker_port_get_settings,
3829 .set_settings = rocker_port_set_settings, 3969 .set_settings = rocker_port_set_settings,
3830 .get_drvinfo = rocker_port_get_drvinfo, 3970 .get_drvinfo = rocker_port_get_drvinfo,
3831 .get_link = ethtool_op_get_link, 3971 .get_link = ethtool_op_get_link,
3972 .get_strings = rocker_port_get_strings,
3973 .get_ethtool_stats = rocker_port_get_stats,
3974 .get_sset_count = rocker_port_get_sset_count,
3832}; 3975};
3833 3976
3834/***************** 3977/*****************
@@ -3850,12 +3993,22 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
3850 3993
3851 /* Cleanup tx descriptors */ 3994 /* Cleanup tx descriptors */
3852 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { 3995 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
3996 struct sk_buff *skb;
3997
3853 err = rocker_desc_err(desc_info); 3998 err = rocker_desc_err(desc_info);
3854 if (err && net_ratelimit()) 3999 if (err && net_ratelimit())
3855 netdev_err(rocker_port->dev, "tx desc received with err %d\n", 4000 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
3856 err); 4001 err);
3857 rocker_tx_desc_frags_unmap(rocker_port, desc_info); 4002 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
3858 dev_kfree_skb_any(rocker_desc_cookie_ptr_get(desc_info)); 4003
4004 skb = rocker_desc_cookie_ptr_get(desc_info);
4005 if (err == 0) {
4006 rocker_port->dev->stats.tx_packets++;
4007 rocker_port->dev->stats.tx_bytes += skb->len;
4008 } else
4009 rocker_port->dev->stats.tx_errors++;
4010
4011 dev_kfree_skb_any(skb);
3859 credits++; 4012 credits++;
3860 } 4013 }
3861 4014
@@ -3888,6 +4041,10 @@ static int rocker_port_rx_proc(struct rocker *rocker,
3888 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); 4041 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
3889 skb_put(skb, rx_len); 4042 skb_put(skb, rx_len);
3890 skb->protocol = eth_type_trans(skb, rocker_port->dev); 4043 skb->protocol = eth_type_trans(skb, rocker_port->dev);
4044
4045 rocker_port->dev->stats.rx_packets++;
4046 rocker_port->dev->stats.rx_bytes += skb->len;
4047
3891 netif_receive_skb(skb); 4048 netif_receive_skb(skb);
3892 4049
3893 return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info); 4050 return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
@@ -3921,6 +4078,9 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
3921 netdev_err(rocker_port->dev, "rx processing failed with err %d\n", 4078 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
3922 err); 4079 err);
3923 } 4080 }
4081 if (err)
4082 rocker_port->dev->stats.rx_errors++;
4083
3924 rocker_desc_gen_clear(desc_info); 4084 rocker_desc_gen_clear(desc_info);
3925 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); 4085 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
3926 credits++; 4086 credits++;
@@ -4004,7 +4164,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4004 NAPI_POLL_WEIGHT); 4164 NAPI_POLL_WEIGHT);
4005 rocker_carrier_init(rocker_port); 4165 rocker_carrier_init(rocker_port);
4006 4166
4007 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 4167 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4168 NETIF_F_HW_SWITCH_OFFLOAD;
4008 4169
4009 err = register_netdev(dev); 4170 err = register_netdev(dev);
4010 if (err) { 4171 if (err) {
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 8d2865ba634c..a5bc432feada 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -127,6 +127,9 @@ enum {
127 ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, 127 ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL,
128 ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, 128 ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS,
129 129
130 ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS,
131 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS,
132
130 __ROCKER_TLV_CMD_TYPE_MAX, 133 __ROCKER_TLV_CMD_TYPE_MAX,
131 ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, 134 ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1,
132}; 135};
@@ -146,6 +149,24 @@ enum {
146 __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, 149 __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1,
147}; 150};
148 151
152enum {
153 ROCKER_TLV_CMD_PORT_STATS_UNSPEC,
154 ROCKER_TLV_CMD_PORT_STATS_LPORT, /* u32 */
155
156 ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */
157 ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */
158 ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, /* u64 */
159 ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, /* u64 */
160
161 ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, /* u64 */
162 ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, /* u64 */
163 ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, /* u64 */
164 ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, /* u64 */
165
166 __ROCKER_TLV_CMD_PORT_STATS_MAX,
167 ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1,
168};
169
149enum rocker_port_mode { 170enum rocker_port_mode {
150 ROCKER_PORT_MODE_OF_DPA, 171 ROCKER_PORT_MODE_OF_DPA,
151}; 172};
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index b1a271853d85..c8a01ee4d25e 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -133,9 +133,8 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
133 return false; 133 return false;
134 134
135 priv->eee_active = 1; 135 priv->eee_active = 1;
136 init_timer(&priv->eee_ctrl_timer); 136 setup_timer(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer,
137 priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer; 137 (unsigned long)priv);
138 priv->eee_ctrl_timer.data = (unsigned long)priv;
139 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer); 138 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
140 add_timer(&priv->eee_ctrl_timer); 139 add_timer(&priv->eee_ctrl_timer);
141 140
@@ -365,6 +364,26 @@ static int sxgbe_init_rx_buffers(struct net_device *dev,
365 364
366 return 0; 365 return 0;
367} 366}
367
368/**
369 * sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated
370 * @dev: net device structure
371 * @rx_ring: ring to be freed
372 * @rx_rsize: ring size
373 * Description: this function initializes the DMA RX descriptor
374 */
375static void sxgbe_free_rx_buffers(struct net_device *dev,
376 struct sxgbe_rx_norm_desc *p, int i,
377 unsigned int dma_buf_sz,
378 struct sxgbe_rx_queue *rx_ring)
379{
380 struct sxgbe_priv_data *priv = netdev_priv(dev);
381
382 kfree_skb(rx_ring->rx_skbuff[i]);
383 dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i],
384 dma_buf_sz, DMA_FROM_DEVICE);
385}
386
368/** 387/**
369 * init_tx_ring - init the TX descriptor ring 388 * init_tx_ring - init the TX descriptor ring
370 * @dev: net device structure 389 * @dev: net device structure
@@ -457,7 +476,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
457 /* RX ring is not allcoated */ 476 /* RX ring is not allcoated */
458 if (rx_ring == NULL) { 477 if (rx_ring == NULL) {
459 netdev_err(dev, "No memory for RX queue\n"); 478 netdev_err(dev, "No memory for RX queue\n");
460 goto error; 479 return -ENOMEM;
461 } 480 }
462 481
463 /* assign queue number */ 482 /* assign queue number */
@@ -469,23 +488,21 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
469 &rx_ring->dma_rx_phy, GFP_KERNEL); 488 &rx_ring->dma_rx_phy, GFP_KERNEL);
470 489
471 if (rx_ring->dma_rx == NULL) 490 if (rx_ring->dma_rx == NULL)
472 goto error; 491 return -ENOMEM;
473 492
474 /* allocate memory for RX skbuff array */ 493 /* allocate memory for RX skbuff array */
475 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, 494 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
476 sizeof(dma_addr_t), GFP_KERNEL); 495 sizeof(dma_addr_t), GFP_KERNEL);
477 if (!rx_ring->rx_skbuff_dma) { 496 if (!rx_ring->rx_skbuff_dma) {
478 dma_free_coherent(priv->device, 497 ret = -ENOMEM;
479 rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 498 goto err_free_dma_rx;
480 rx_ring->dma_rx, rx_ring->dma_rx_phy);
481 goto error;
482 } 499 }
483 500
484 rx_ring->rx_skbuff = kmalloc_array(rx_rsize, 501 rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
485 sizeof(struct sk_buff *), GFP_KERNEL); 502 sizeof(struct sk_buff *), GFP_KERNEL);
486 if (!rx_ring->rx_skbuff) { 503 if (!rx_ring->rx_skbuff) {
487 kfree(rx_ring->rx_skbuff_dma); 504 ret = -ENOMEM;
488 goto error; 505 goto err_free_skbuff_dma;
489 } 506 }
490 507
491 /* initialise the buffers */ 508 /* initialise the buffers */
@@ -495,7 +512,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
495 ret = sxgbe_init_rx_buffers(dev, p, desc_index, 512 ret = sxgbe_init_rx_buffers(dev, p, desc_index,
496 bfsize, rx_ring); 513 bfsize, rx_ring);
497 if (ret) 514 if (ret)
498 goto err_init_rx_buffers; 515 goto err_free_rx_buffers;
499 } 516 }
500 517
501 /* initalise counters */ 518 /* initalise counters */
@@ -505,11 +522,22 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
505 522
506 return 0; 523 return 0;
507 524
508err_init_rx_buffers: 525err_free_rx_buffers:
509 while (--desc_index >= 0) 526 while (--desc_index >= 0) {
510 free_rx_ring(priv->device, rx_ring, desc_index); 527 struct sxgbe_rx_norm_desc *p;
511error: 528
512 return -ENOMEM; 529 p = rx_ring->dma_rx + desc_index;
530 sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring);
531 }
532 kfree(rx_ring->rx_skbuff);
533err_free_skbuff_dma:
534 kfree(rx_ring->rx_skbuff_dma);
535err_free_dma_rx:
536 dma_free_coherent(priv->device,
537 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
538 rx_ring->dma_rx, rx_ring->dma_rx_phy);
539
540 return ret;
513} 541}
514/** 542/**
515 * free_tx_ring - free the TX descriptor ring 543 * free_tx_ring - free the TX descriptor ring
@@ -1008,10 +1036,9 @@ static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
1008 struct sxgbe_tx_queue *p = priv->txq[queue_num]; 1036 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1009 p->tx_coal_frames = SXGBE_TX_FRAMES; 1037 p->tx_coal_frames = SXGBE_TX_FRAMES;
1010 p->tx_coal_timer = SXGBE_COAL_TX_TIMER; 1038 p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
1011 init_timer(&p->txtimer); 1039 setup_timer(&p->txtimer, sxgbe_tx_timer,
1040 (unsigned long)&priv->txq[queue_num]);
1012 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer); 1041 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
1013 p->txtimer.data = (unsigned long)&priv->txq[queue_num];
1014 p->txtimer.function = sxgbe_tx_timer;
1015 add_timer(&p->txtimer); 1042 add_timer(&p->txtimer);
1016 } 1043 }
1017} 1044}
@@ -1273,7 +1300,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
1273 if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss)) 1300 if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
1274 ctxt_desc_req = 1; 1301 ctxt_desc_req = 1;
1275 1302
1276 if (unlikely(vlan_tx_tag_present(skb) || 1303 if (unlikely(skb_vlan_tag_present(skb) ||
1277 ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 1304 ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1278 tqueue->hwts_tx_en))) 1305 tqueue->hwts_tx_en)))
1279 ctxt_desc_req = 1; 1306 ctxt_desc_req = 1;
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 9468e64e6007..3e97a8b43147 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -5,8 +5,9 @@
5config NET_VENDOR_SMSC 5config NET_VENDOR_SMSC
6 bool "SMC (SMSC)/Western Digital devices" 6 bool "SMC (SMSC)/Western Digital devices"
7 default y 7 default y
8 depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \ 8 depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \
9 BLACKFIN || MN10300 || COLDFIRE || XTENSA || NIOS2 || PCI || PCMCIA 9 ISA || M32R || MAC || MIPS || MN10300 || NIOS2 || PCI || \
10 PCMCIA || SUPERH || XTENSA
10 ---help--- 11 ---help---
11 If you have a network (Ethernet) card belonging to this class, say Y 12 If you have a network (Ethernet) card belonging to this class, say Y
12 and read the Ethernet-HOWTO, available from 13 and read the Ethernet-HOWTO, available from
@@ -38,8 +39,9 @@ config SMC91X
38 tristate "SMC 91C9x/91C1xxx support" 39 tristate "SMC 91C9x/91C1xxx support"
39 select CRC32 40 select CRC32
40 select MII 41 select MII
41 depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \ 42 depends on !OF || GPIOLIB
42 MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2) && (!OF || GPIOLIB) 43 depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \
44 M32R || MIPS || MN10300 || NIOS2 || SUPERH || XTENSA
43 ---help--- 45 ---help---
44 This is a driver for SMC's 91x series of Ethernet chipsets, 46 This is a driver for SMC's 91x series of Ethernet chipsets,
45 including the SMC91C94 and the SMC91C111. Say Y if you want it 47 including the SMC91C94 and the SMC91C111. Say Y if you want it
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 2a38dacbbd27..be67baf5f677 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -216,6 +216,27 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
216 216
217#include <unit/smc91111.h> 217#include <unit/smc91111.h>
218 218
219#elif defined(CONFIG_ATARI)
220
221#define SMC_CAN_USE_8BIT 1
222#define SMC_CAN_USE_16BIT 1
223#define SMC_CAN_USE_32BIT 1
224#define SMC_NOWAIT 1
225
226#define SMC_inb(a, r) readb((a) + (r))
227#define SMC_inw(a, r) readw((a) + (r))
228#define SMC_inl(a, r) readl((a) + (r))
229#define SMC_outb(v, a, r) writeb(v, (a) + (r))
230#define SMC_outw(v, a, r) writew(v, (a) + (r))
231#define SMC_outl(v, a, r) writel(v, (a) + (r))
232#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
233#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
234#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
235#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
236
237#define RPC_LSA_DEFAULT RPC_LED_100_10
238#define RPC_LSB_DEFAULT RPC_LED_TX_RX
239
219#elif defined(CONFIG_ARCH_MSM) 240#elif defined(CONFIG_ARCH_MSM)
220 241
221#define SMC_CAN_USE_8BIT 0 242#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index ac4d5629d905..73c2715a27f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
6 6
7obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o 7obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
8stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \ 8stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \
9 dwmac-sti.o dwmac-socfpga.o 9 dwmac-sti.o dwmac-socfpga.o dwmac-rk.o
10 10
11obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o 11obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
12stmmac-pci-objs:= stmmac_pci.o 12stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
new file mode 100644
index 000000000000..6249a4ec08f0
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -0,0 +1,437 @@
1/**
2 * dwmac-rk.c - Rockchip RK3288 DWMAC specific glue layer
3 *
4 * Copyright (C) 2014 Chen-Zhi (Roger Chen)
5 *
6 * Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/stmmac.h>
20#include <linux/bitops.h>
21#include <linux/clk.h>
22#include <linux/phy.h>
23#include <linux/of_net.h>
24#include <linux/gpio.h>
25#include <linux/of_gpio.h>
26#include <linux/of_device.h>
27#include <linux/regulator/consumer.h>
28#include <linux/delay.h>
29#include <linux/mfd/syscon.h>
30#include <linux/regmap.h>
31
32struct rk_priv_data {
33 struct platform_device *pdev;
34 int phy_iface;
35 struct regulator *regulator;
36
37 bool clk_enabled;
38 bool clock_input;
39
40 struct clk *clk_mac;
41 struct clk *clk_mac_pll;
42 struct clk *gmac_clkin;
43 struct clk *mac_clk_rx;
44 struct clk *mac_clk_tx;
45 struct clk *clk_mac_ref;
46 struct clk *clk_mac_refout;
47 struct clk *aclk_mac;
48 struct clk *pclk_mac;
49
50 int tx_delay;
51 int rx_delay;
52
53 struct regmap *grf;
54};
55
56#define HIWORD_UPDATE(val, mask, shift) \
57 ((val) << (shift) | (mask) << ((shift) + 16))
58
59#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
60#define GRF_CLR_BIT(nr) (BIT(nr+16))
61
62#define RK3288_GRF_SOC_CON1 0x0248
63#define RK3288_GRF_SOC_CON3 0x0250
64#define RK3288_GRF_GPIO3D_E 0x01ec
65#define RK3288_GRF_GPIO4A_E 0x01f0
66#define RK3288_GRF_GPIO4B_E 0x01f4
67
68/*RK3288_GRF_SOC_CON1*/
69#define GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
70#define GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
71#define GMAC_FLOW_CTRL GRF_BIT(9)
72#define GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
73#define GMAC_SPEED_10M GRF_CLR_BIT(10)
74#define GMAC_SPEED_100M GRF_BIT(10)
75#define GMAC_RMII_CLK_25M GRF_BIT(11)
76#define GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
77#define GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
78#define GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
79#define GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
80#define GMAC_RMII_MODE GRF_BIT(14)
81#define GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
82
83/*RK3288_GRF_SOC_CON3*/
84#define GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
85#define GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
86#define GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
87#define GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
88#define GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
89#define GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
90
91static void set_to_rgmii(struct rk_priv_data *bsp_priv,
92 int tx_delay, int rx_delay)
93{
94 struct device *dev = &bsp_priv->pdev->dev;
95
96 if (IS_ERR(bsp_priv->grf)) {
97 dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
98 return;
99 }
100
101 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
102 GMAC_PHY_INTF_SEL_RGMII | GMAC_RMII_MODE_CLR);
103 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
104 GMAC_RXCLK_DLY_ENABLE | GMAC_TXCLK_DLY_ENABLE |
105 GMAC_CLK_RX_DL_CFG(rx_delay) |
106 GMAC_CLK_TX_DL_CFG(tx_delay));
107}
108
109static void set_to_rmii(struct rk_priv_data *bsp_priv)
110{
111 struct device *dev = &bsp_priv->pdev->dev;
112
113 if (IS_ERR(bsp_priv->grf)) {
114 dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
115 return;
116 }
117
118 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
119 GMAC_PHY_INTF_SEL_RMII | GMAC_RMII_MODE);
120}
121
122static void set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
123{
124 struct device *dev = &bsp_priv->pdev->dev;
125
126 if (IS_ERR(bsp_priv->grf)) {
127 dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
128 return;
129 }
130
131 if (speed == 10)
132 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_2_5M);
133 else if (speed == 100)
134 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_25M);
135 else if (speed == 1000)
136 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_125M);
137 else
138 dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
139}
140
141static void set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
142{
143 struct device *dev = &bsp_priv->pdev->dev;
144
145 if (IS_ERR(bsp_priv->grf)) {
146 dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
147 return;
148 }
149
150 if (speed == 10) {
151 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
152 GMAC_RMII_CLK_2_5M | GMAC_SPEED_10M);
153 } else if (speed == 100) {
154 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
155 GMAC_RMII_CLK_25M | GMAC_SPEED_100M);
156 } else {
157 dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
158 }
159}
160
161static int gmac_clk_init(struct rk_priv_data *bsp_priv)
162{
163 struct device *dev = &bsp_priv->pdev->dev;
164
165 bsp_priv->clk_enabled = false;
166
167 bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx");
168 if (IS_ERR(bsp_priv->mac_clk_rx))
169 dev_err(dev, "%s: cannot get clock %s\n",
170 __func__, "mac_clk_rx");
171
172 bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx");
173 if (IS_ERR(bsp_priv->mac_clk_tx))
174 dev_err(dev, "%s: cannot get clock %s\n",
175 __func__, "mac_clk_tx");
176
177 bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac");
178 if (IS_ERR(bsp_priv->aclk_mac))
179 dev_err(dev, "%s: cannot get clock %s\n",
180 __func__, "aclk_mac");
181
182 bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac");
183 if (IS_ERR(bsp_priv->pclk_mac))
184 dev_err(dev, "%s: cannot get clock %s\n",
185 __func__, "pclk_mac");
186
187 bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
188 if (IS_ERR(bsp_priv->clk_mac))
189 dev_err(dev, "%s: cannot get clock %s\n",
190 __func__, "stmmaceth");
191
192 if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
193 bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref");
194 if (IS_ERR(bsp_priv->clk_mac_ref))
195 dev_err(dev, "%s: cannot get clock %s\n",
196 __func__, "clk_mac_ref");
197
198 if (!bsp_priv->clock_input) {
199 bsp_priv->clk_mac_refout =
200 devm_clk_get(dev, "clk_mac_refout");
201 if (IS_ERR(bsp_priv->clk_mac_refout))
202 dev_err(dev, "%s: cannot get clock %s\n",
203 __func__, "clk_mac_refout");
204 }
205 }
206
207 if (bsp_priv->clock_input) {
208 dev_info(dev, "%s: clock input from PHY\n", __func__);
209 } else {
210 if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
211 clk_set_rate(bsp_priv->clk_mac_pll, 50000000);
212 }
213
214 return 0;
215}
216
217static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
218{
219 int phy_iface = phy_iface = bsp_priv->phy_iface;
220
221 if (enable) {
222 if (!bsp_priv->clk_enabled) {
223 if (phy_iface == PHY_INTERFACE_MODE_RMII) {
224 if (!IS_ERR(bsp_priv->mac_clk_rx))
225 clk_prepare_enable(
226 bsp_priv->mac_clk_rx);
227
228 if (!IS_ERR(bsp_priv->clk_mac_ref))
229 clk_prepare_enable(
230 bsp_priv->clk_mac_ref);
231
232 if (!IS_ERR(bsp_priv->clk_mac_refout))
233 clk_prepare_enable(
234 bsp_priv->clk_mac_refout);
235 }
236
237 if (!IS_ERR(bsp_priv->aclk_mac))
238 clk_prepare_enable(bsp_priv->aclk_mac);
239
240 if (!IS_ERR(bsp_priv->pclk_mac))
241 clk_prepare_enable(bsp_priv->pclk_mac);
242
243 if (!IS_ERR(bsp_priv->mac_clk_tx))
244 clk_prepare_enable(bsp_priv->mac_clk_tx);
245
246 /**
247 * if (!IS_ERR(bsp_priv->clk_mac))
248 * clk_prepare_enable(bsp_priv->clk_mac);
249 */
250 mdelay(5);
251 bsp_priv->clk_enabled = true;
252 }
253 } else {
254 if (bsp_priv->clk_enabled) {
255 if (phy_iface == PHY_INTERFACE_MODE_RMII) {
256 if (!IS_ERR(bsp_priv->mac_clk_rx))
257 clk_disable_unprepare(
258 bsp_priv->mac_clk_rx);
259
260 if (!IS_ERR(bsp_priv->clk_mac_ref))
261 clk_disable_unprepare(
262 bsp_priv->clk_mac_ref);
263
264 if (!IS_ERR(bsp_priv->clk_mac_refout))
265 clk_disable_unprepare(
266 bsp_priv->clk_mac_refout);
267 }
268
269 if (!IS_ERR(bsp_priv->aclk_mac))
270 clk_disable_unprepare(bsp_priv->aclk_mac);
271
272 if (!IS_ERR(bsp_priv->pclk_mac))
273 clk_disable_unprepare(bsp_priv->pclk_mac);
274
275 if (!IS_ERR(bsp_priv->mac_clk_tx))
276 clk_disable_unprepare(bsp_priv->mac_clk_tx);
277 /**
278 * if (!IS_ERR(bsp_priv->clk_mac))
279 * clk_disable_unprepare(bsp_priv->clk_mac);
280 */
281 bsp_priv->clk_enabled = false;
282 }
283 }
284
285 return 0;
286}
287
288static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
289{
290 struct regulator *ldo = bsp_priv->regulator;
291 int ret;
292 struct device *dev = &bsp_priv->pdev->dev;
293
294 if (!ldo) {
295 dev_err(dev, "%s: no regulator found\n", __func__);
296 return -1;
297 }
298
299 if (enable) {
300 ret = regulator_enable(ldo);
301 if (ret)
302 dev_err(dev, "%s: fail to enable phy-supply\n",
303 __func__);
304 } else {
305 ret = regulator_disable(ldo);
306 if (ret)
307 dev_err(dev, "%s: fail to disable phy-supply\n",
308 __func__);
309 }
310
311 return 0;
312}
313
314static void *rk_gmac_setup(struct platform_device *pdev)
315{
316 struct rk_priv_data *bsp_priv;
317 struct device *dev = &pdev->dev;
318 int ret;
319 const char *strings = NULL;
320 int value;
321
322 bsp_priv = devm_kzalloc(dev, sizeof(*bsp_priv), GFP_KERNEL);
323 if (!bsp_priv)
324 return ERR_PTR(-ENOMEM);
325
326 bsp_priv->phy_iface = of_get_phy_mode(dev->of_node);
327
328 bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
329 if (IS_ERR(bsp_priv->regulator)) {
330 if (PTR_ERR(bsp_priv->regulator) == -EPROBE_DEFER) {
331 dev_err(dev, "phy regulator is not available yet, deferred probing\n");
332 return ERR_PTR(-EPROBE_DEFER);
333 }
334 dev_err(dev, "no regulator found\n");
335 bsp_priv->regulator = NULL;
336 }
337
338 ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
339 if (ret) {
340 dev_err(dev, "%s: Can not read property: clock_in_out.\n",
341 __func__);
342 bsp_priv->clock_input = true;
343 } else {
344 dev_info(dev, "%s: clock input or output? (%s).\n",
345 __func__, strings);
346 if (!strcmp(strings, "input"))
347 bsp_priv->clock_input = true;
348 else
349 bsp_priv->clock_input = false;
350 }
351
352 ret = of_property_read_u32(dev->of_node, "tx_delay", &value);
353 if (ret) {
354 bsp_priv->tx_delay = 0x30;
355 dev_err(dev, "%s: Can not read property: tx_delay.", __func__);
356 dev_err(dev, "%s: set tx_delay to 0x%x\n",
357 __func__, bsp_priv->tx_delay);
358 } else {
359 dev_info(dev, "%s: TX delay(0x%x).\n", __func__, value);
360 bsp_priv->tx_delay = value;
361 }
362
363 ret = of_property_read_u32(dev->of_node, "rx_delay", &value);
364 if (ret) {
365 bsp_priv->rx_delay = 0x10;
366 dev_err(dev, "%s: Can not read property: rx_delay.", __func__);
367 dev_err(dev, "%s: set rx_delay to 0x%x\n",
368 __func__, bsp_priv->rx_delay);
369 } else {
370 dev_info(dev, "%s: RX delay(0x%x).\n", __func__, value);
371 bsp_priv->rx_delay = value;
372 }
373
374 bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
375 "rockchip,grf");
376 bsp_priv->pdev = pdev;
377
378 /*rmii or rgmii*/
379 if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
380 dev_info(dev, "%s: init for RGMII\n", __func__);
381 set_to_rgmii(bsp_priv, bsp_priv->tx_delay, bsp_priv->rx_delay);
382 } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
383 dev_info(dev, "%s: init for RMII\n", __func__);
384 set_to_rmii(bsp_priv);
385 } else {
386 dev_err(dev, "%s: NO interface defined!\n", __func__);
387 }
388
389 gmac_clk_init(bsp_priv);
390
391 return bsp_priv;
392}
393
394static int rk_gmac_init(struct platform_device *pdev, void *priv)
395{
396 struct rk_priv_data *bsp_priv = priv;
397 int ret;
398
399 ret = phy_power_on(bsp_priv, true);
400 if (ret)
401 return ret;
402
403 ret = gmac_clk_enable(bsp_priv, true);
404 if (ret)
405 return ret;
406
407 return 0;
408}
409
410static void rk_gmac_exit(struct platform_device *pdev, void *priv)
411{
412 struct rk_priv_data *gmac = priv;
413
414 phy_power_on(gmac, false);
415 gmac_clk_enable(gmac, false);
416}
417
418static void rk_fix_speed(void *priv, unsigned int speed)
419{
420 struct rk_priv_data *bsp_priv = priv;
421 struct device *dev = &bsp_priv->pdev->dev;
422
423 if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
424 set_rgmii_speed(bsp_priv, speed);
425 else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
426 set_rmii_speed(bsp_priv, speed);
427 else
428 dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
429}
430
431const struct stmmac_of_data rk3288_gmac_data = {
432 .has_gmac = 1,
433 .fix_mac_speed = rk_fix_speed,
434 .setup = rk_gmac_setup,
435 .init = rk_gmac_init,
436 .exit = rk_gmac_exit,
437};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 056b358b4a72..bb6e2dc61bec 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -122,7 +122,7 @@ struct sti_dwmac {
122 bool ext_phyclk; /* Clock from external PHY */ 122 bool ext_phyclk; /* Clock from external PHY */
123 u32 tx_retime_src; /* TXCLK Retiming*/ 123 u32 tx_retime_src; /* TXCLK Retiming*/
124 struct clk *clk; /* PHY clock */ 124 struct clk *clk; /* PHY clock */
125 int ctrl_reg; /* GMAC glue-logic control register */ 125 u32 ctrl_reg; /* GMAC glue-logic control register */
126 int clk_sel_reg; /* GMAC ext clk selection register */ 126 int clk_sel_reg; /* GMAC ext clk selection register */
127 struct device *dev; 127 struct device *dev;
128 struct regmap *regmap; 128 struct regmap *regmap;
@@ -285,11 +285,6 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
285 if (!np) 285 if (!np)
286 return -EINVAL; 286 return -EINVAL;
287 287
288 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
289 if (!res)
290 return -ENODATA;
291 dwmac->ctrl_reg = res->start;
292
293 /* clk selection from extra syscfg register */ 288 /* clk selection from extra syscfg register */
294 dwmac->clk_sel_reg = -ENXIO; 289 dwmac->clk_sel_reg = -ENXIO;
295 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf"); 290 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
@@ -300,6 +295,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
300 if (IS_ERR(regmap)) 295 if (IS_ERR(regmap))
301 return PTR_ERR(regmap); 296 return PTR_ERR(regmap);
302 297
298 err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
299 if (err) {
300 dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
301 return err;
302 }
303
303 dwmac->dev = dev; 304 dwmac->dev = dev;
304 dwmac->interface = of_get_phy_mode(np); 305 dwmac->interface = of_get_phy_mode(np);
305 dwmac->regmap = regmap; 306 dwmac->regmap = regmap;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index cf62ff4c8c56..55e89b3838f1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1097,6 +1097,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1097 1097
1098 priv->dirty_tx = 0; 1098 priv->dirty_tx = 0;
1099 priv->cur_tx = 0; 1099 priv->cur_tx = 0;
1100 netdev_reset_queue(priv->dev);
1100 1101
1101 stmmac_clear_descriptors(priv); 1102 stmmac_clear_descriptors(priv);
1102 1103
@@ -1287,7 +1288,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1287 * that needs to not insert csum in the TDES. 1288 * that needs to not insert csum in the TDES.
1288 */ 1289 */
1289 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE); 1290 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE);
1290 tc = SF_DMA_MODE; 1291 priv->xstats.threshold = SF_DMA_MODE;
1291 } else 1292 } else
1292 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 1293 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
1293} 1294}
@@ -1300,6 +1301,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1300static void stmmac_tx_clean(struct stmmac_priv *priv) 1301static void stmmac_tx_clean(struct stmmac_priv *priv)
1301{ 1302{
1302 unsigned int txsize = priv->dma_tx_size; 1303 unsigned int txsize = priv->dma_tx_size;
1304 unsigned int bytes_compl = 0, pkts_compl = 0;
1303 1305
1304 spin_lock(&priv->tx_lock); 1306 spin_lock(&priv->tx_lock);
1305 1307
@@ -1356,6 +1358,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1356 priv->hw->mode->clean_desc3(priv, p); 1358 priv->hw->mode->clean_desc3(priv, p);
1357 1359
1358 if (likely(skb != NULL)) { 1360 if (likely(skb != NULL)) {
1361 pkts_compl++;
1362 bytes_compl += skb->len;
1359 dev_consume_skb_any(skb); 1363 dev_consume_skb_any(skb);
1360 priv->tx_skbuff[entry] = NULL; 1364 priv->tx_skbuff[entry] = NULL;
1361 } 1365 }
@@ -1364,6 +1368,9 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1364 1368
1365 priv->dirty_tx++; 1369 priv->dirty_tx++;
1366 } 1370 }
1371
1372 netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1373
1367 if (unlikely(netif_queue_stopped(priv->dev) && 1374 if (unlikely(netif_queue_stopped(priv->dev) &&
1368 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { 1375 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
1369 netif_tx_lock(priv->dev); 1376 netif_tx_lock(priv->dev);
@@ -1418,6 +1425,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
1418 (i == txsize - 1)); 1425 (i == txsize - 1));
1419 priv->dirty_tx = 0; 1426 priv->dirty_tx = 0;
1420 priv->cur_tx = 0; 1427 priv->cur_tx = 0;
1428 netdev_reset_queue(priv->dev);
1421 priv->hw->dma->start_tx(priv->ioaddr); 1429 priv->hw->dma->start_tx(priv->ioaddr);
1422 1430
1423 priv->dev->stats.tx_errors++; 1431 priv->dev->stats.tx_errors++;
@@ -1444,9 +1452,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1444 } 1452 }
1445 if (unlikely(status & tx_hard_error_bump_tc)) { 1453 if (unlikely(status & tx_hard_error_bump_tc)) {
1446 /* Try to bump up the dma threshold on this failure */ 1454 /* Try to bump up the dma threshold on this failure */
1447 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { 1455 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1456 (tc <= 256)) {
1448 tc += 64; 1457 tc += 64;
1449 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 1458 if (priv->plat->force_thresh_dma_mode)
1459 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
1460 else
1461 priv->hw->dma->dma_mode(priv->ioaddr, tc,
1462 SF_DMA_MODE);
1450 priv->xstats.threshold = tc; 1463 priv->xstats.threshold = tc;
1451 } 1464 }
1452 } else if (unlikely(status == tx_hard_error)) 1465 } else if (unlikely(status == tx_hard_error))
@@ -2050,6 +2063,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2050 if (!priv->hwts_tx_en) 2063 if (!priv->hwts_tx_en)
2051 skb_tx_timestamp(skb); 2064 skb_tx_timestamp(skb);
2052 2065
2066 netdev_sent_queue(dev, skb->len);
2053 priv->hw->dma->enable_dma_transmission(priv->ioaddr); 2067 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2054 2068
2055 spin_unlock(&priv->tx_lock); 2069 spin_unlock(&priv->tx_lock);
@@ -2742,7 +2756,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2742 priv->plat->enh_desc = priv->dma_cap.enh_desc; 2756 priv->plat->enh_desc = priv->dma_cap.enh_desc;
2743 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 2757 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
2744 2758
2745 priv->plat->tx_coe = priv->dma_cap.tx_coe; 2759 /* TXCOE doesn't work in thresh DMA mode */
2760 if (priv->plat->force_thresh_dma_mode)
2761 priv->plat->tx_coe = 0;
2762 else
2763 priv->plat->tx_coe = priv->dma_cap.tx_coe;
2746 2764
2747 if (priv->dma_cap.rx_coe_type2) 2765 if (priv->dma_cap.rx_coe_type2)
2748 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 2766 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 054520d67de4..3bca908716e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -24,8 +24,50 @@
24*******************************************************************************/ 24*******************************************************************************/
25 25
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/dmi.h>
28
27#include "stmmac.h" 29#include "stmmac.h"
28 30
31/*
32 * This struct is used to associate PCI Function of MAC controller on a board,
33 * discovered via DMI, with the address of PHY connected to the MAC. The
34 * negative value of the address means that MAC controller is not connected
35 * with PHY.
36 */
37struct stmmac_pci_dmi_data {
38 const char *name;
39 unsigned int func;
40 int phy_addr;
41};
42
43struct stmmac_pci_info {
44 struct pci_dev *pdev;
45 int (*setup)(struct plat_stmmacenet_data *plat,
46 struct stmmac_pci_info *info);
47 struct stmmac_pci_dmi_data *dmi;
48};
49
50static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
51{
52 const char *name = dmi_get_system_info(DMI_BOARD_NAME);
53 unsigned int func = PCI_FUNC(info->pdev->devfn);
54 struct stmmac_pci_dmi_data *dmi;
55
56 /*
57 * Galileo boards with old firmware don't support DMI. We always return
58 * 1 here, so at least first found MAC controller would be probed.
59 */
60 if (!name)
61 return 1;
62
63 for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
64 if (!strcmp(dmi->name, name) && dmi->func == func)
65 return dmi->phy_addr;
66 }
67
68 return -ENODEV;
69}
70
29static void stmmac_default_data(struct plat_stmmacenet_data *plat) 71static void stmmac_default_data(struct plat_stmmacenet_data *plat)
30{ 72{
31 plat->bus_id = 1; 73 plat->bus_id = 1;
@@ -48,6 +90,62 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
48 plat->unicast_filter_entries = 1; 90 plat->unicast_filter_entries = 1;
49} 91}
50 92
93static int quark_default_data(struct plat_stmmacenet_data *plat,
94 struct stmmac_pci_info *info)
95{
96 struct pci_dev *pdev = info->pdev;
97 int ret;
98
99 /*
100 * Refuse to load the driver and register net device if MAC controller
101 * does not connect to any PHY interface.
102 */
103 ret = stmmac_pci_find_phy_addr(info);
104 if (ret < 0)
105 return ret;
106
107 plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
108 plat->phy_addr = ret;
109 plat->interface = PHY_INTERFACE_MODE_RMII;
110 plat->clk_csr = 2;
111 plat->has_gmac = 1;
112 plat->force_sf_dma_mode = 1;
113
114 plat->mdio_bus_data->phy_reset = NULL;
115 plat->mdio_bus_data->phy_mask = 0;
116
117 plat->dma_cfg->pbl = 16;
118 plat->dma_cfg->burst_len = DMA_AXI_BLEN_256;
119 plat->dma_cfg->fixed_burst = 1;
120
121 /* Set default value for multicast hash bins */
122 plat->multicast_filter_bins = HASH_TABLE_SIZE;
123
124 /* Set default value for unicast filter entries */
125 plat->unicast_filter_entries = 1;
126
127 return 0;
128}
129
130static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = {
131 {
132 .name = "Galileo",
133 .func = 6,
134 .phy_addr = 1,
135 },
136 {
137 .name = "GalileoGen2",
138 .func = 6,
139 .phy_addr = 1,
140 },
141 {}
142};
143
144static struct stmmac_pci_info quark_pci_info = {
145 .setup = quark_default_data,
146 .dmi = quark_pci_dmi_data,
147};
148
51/** 149/**
52 * stmmac_pci_probe 150 * stmmac_pci_probe
53 * 151 *
@@ -63,6 +161,7 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
63static int stmmac_pci_probe(struct pci_dev *pdev, 161static int stmmac_pci_probe(struct pci_dev *pdev,
64 const struct pci_device_id *id) 162 const struct pci_device_id *id)
65{ 163{
164 struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
66 struct plat_stmmacenet_data *plat; 165 struct plat_stmmacenet_data *plat;
67 struct stmmac_priv *priv; 166 struct stmmac_priv *priv;
68 int i; 167 int i;
@@ -103,7 +202,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
103 202
104 pci_set_master(pdev); 203 pci_set_master(pdev);
105 204
106 stmmac_default_data(plat); 205 if (info) {
206 info->pdev = pdev;
207 if (info->setup) {
208 ret = info->setup(plat, info);
209 if (ret)
210 return ret;
211 }
212 } else
213 stmmac_default_data(plat);
214
215 pci_enable_msi(pdev);
107 216
108 priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]); 217 priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]);
109 if (IS_ERR(priv)) { 218 if (IS_ERR(priv)) {
@@ -155,11 +264,13 @@ static int stmmac_pci_resume(struct device *dev)
155static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); 264static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
156 265
157#define STMMAC_VENDOR_ID 0x700 266#define STMMAC_VENDOR_ID 0x700
267#define STMMAC_QUARK_ID 0x0937
158#define STMMAC_DEVICE_ID 0x1108 268#define STMMAC_DEVICE_ID 0x1108
159 269
160static const struct pci_device_id stmmac_id_table[] = { 270static const struct pci_device_id stmmac_id_table[] = {
161 {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, 271 {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)},
162 {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)},
273 {PCI_VDEVICE(INTEL, STMMAC_QUARK_ID), (kernel_ulong_t)&quark_pci_info},
163 {} 274 {}
164}; 275};
165 276
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3039de2465ba..fb846ebba1d9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -33,6 +33,7 @@
33 33
34static const struct of_device_id stmmac_dt_ids[] = { 34static const struct of_device_id stmmac_dt_ids[] = {
35 /* SoC specific glue layers should come before generic bindings */ 35 /* SoC specific glue layers should come before generic bindings */
36 { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
36 { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data}, 37 { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
37 { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, 38 { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
38 { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data}, 39 { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
@@ -234,6 +235,9 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
234 of_property_read_bool(np, "snps,fixed-burst"); 235 of_property_read_bool(np, "snps,fixed-burst");
235 dma_cfg->mixed_burst = 236 dma_cfg->mixed_burst =
236 of_property_read_bool(np, "snps,mixed-burst"); 237 of_property_read_bool(np, "snps,mixed-burst");
238 of_property_read_u32(np, "snps,burst_len", &dma_cfg->burst_len);
239 if (dma_cfg->burst_len < 0 || dma_cfg->burst_len > 256)
240 dma_cfg->burst_len = 0;
237 } 241 }
238 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); 242 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
239 if (plat->force_thresh_dma_mode) { 243 if (plat->force_thresh_dma_mode) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 25dd1f7ace02..093eb99e5ffd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -24,5 +24,6 @@ extern const struct stmmac_of_data sun7i_gmac_data;
24extern const struct stmmac_of_data stih4xx_dwmac_data; 24extern const struct stmmac_of_data stih4xx_dwmac_data;
25extern const struct stmmac_of_data stid127_dwmac_data; 25extern const struct stmmac_of_data stid127_dwmac_data;
26extern const struct stmmac_of_data socfpga_gmac_data; 26extern const struct stmmac_of_data socfpga_gmac_data;
27extern const struct stmmac_of_data rk3288_gmac_data;
27 28
28#endif /* __STMMAC_PLATFORM_H__ */ 29#endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0c6416213837..4b51f903fb73 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3341,8 +3341,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
3341 3341
3342 niu_hash_page(rp, page, addr); 3342 niu_hash_page(rp, page, addr);
3343 if (rp->rbr_blocks_per_page > 1) 3343 if (rp->rbr_blocks_per_page > 1)
3344 atomic_add(rp->rbr_blocks_per_page - 1, 3344 atomic_add(rp->rbr_blocks_per_page - 1, &page->_count);
3345 &compound_head(page)->_count);
3346 3345
3347 for (i = 0; i < rp->rbr_blocks_per_page; i++) { 3346 for (i = 0; i < rp->rbr_blocks_per_page; i++) {
3348 __le32 *rbr = &rp->rbr[start_index + i]; 3347 __le32 *rbr = &rp->rbr[start_index + i];
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 3699b98d5b2c..2b10b85d8a08 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -50,6 +50,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
50#define VNET_MAX_RETRIES 10 50#define VNET_MAX_RETRIES 10
51 51
52static int __vnet_tx_trigger(struct vnet_port *port, u32 start); 52static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
53static void vnet_port_reset(struct vnet_port *port);
53 54
54/* Ordered from largest major to lowest */ 55/* Ordered from largest major to lowest */
55static struct vio_version vnet_versions[] = { 56static struct vio_version vnet_versions[] = {
@@ -351,10 +352,15 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
351 unsigned int len = desc->size; 352 unsigned int len = desc->size;
352 unsigned int copy_len; 353 unsigned int copy_len;
353 struct sk_buff *skb; 354 struct sk_buff *skb;
355 int maxlen;
354 int err; 356 int err;
355 357
356 err = -EMSGSIZE; 358 err = -EMSGSIZE;
357 if (unlikely(len < ETH_ZLEN || len > port->rmtu)) { 359 if (port->tso && port->tsolen > port->rmtu)
360 maxlen = port->tsolen;
361 else
362 maxlen = port->rmtu;
363 if (unlikely(len < ETH_ZLEN || len > maxlen)) {
358 dev->stats.rx_length_errors++; 364 dev->stats.rx_length_errors++;
359 goto out_dropped; 365 goto out_dropped;
360 } 366 }
@@ -731,9 +737,7 @@ ldc_ctrl:
731 vio_link_state_change(vio, event); 737 vio_link_state_change(vio, event);
732 738
733 if (event == LDC_EVENT_RESET) { 739 if (event == LDC_EVENT_RESET) {
734 port->rmtu = 0; 740 vnet_port_reset(port);
735 port->tso = true;
736 port->tsolen = 0;
737 vio_port_up(vio); 741 vio_port_up(vio);
738 } 742 }
739 port->rx_event = 0; 743 port->rx_event = 0;
@@ -929,36 +933,36 @@ static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
929 933
930 *pending = 0; 934 *pending = 0;
931 935
932 txi = dr->prod-1; 936 txi = dr->prod;
933 if (txi < 0)
934 txi = VNET_TX_RING_SIZE-1;
935
936 for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 937 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
937 struct vio_net_desc *d; 938 struct vio_net_desc *d;
938 939
939 d = vio_dring_entry(dr, txi); 940 --txi;
940 941 if (txi < 0)
941 if (d->hdr.state == VIO_DESC_DONE) { 942 txi = VNET_TX_RING_SIZE-1;
942 if (port->tx_bufs[txi].skb) {
943 BUG_ON(port->tx_bufs[txi].skb->next);
944 943
945 port->tx_bufs[txi].skb->next = skb; 944 d = vio_dring_entry(dr, txi);
946 skb = port->tx_bufs[txi].skb;
947 port->tx_bufs[txi].skb = NULL;
948 945
949 ldc_unmap(port->vio.lp, 946 if (d->hdr.state == VIO_DESC_READY) {
950 port->tx_bufs[txi].cookies,
951 port->tx_bufs[txi].ncookies);
952 }
953 d->hdr.state = VIO_DESC_FREE;
954 } else if (d->hdr.state == VIO_DESC_READY) {
955 (*pending)++; 947 (*pending)++;
956 } else if (d->hdr.state == VIO_DESC_FREE) { 948 continue;
957 break;
958 } 949 }
959 --txi; 950 if (port->tx_bufs[txi].skb) {
960 if (txi < 0) 951 if (d->hdr.state != VIO_DESC_DONE)
961 txi = VNET_TX_RING_SIZE-1; 952 pr_notice("invalid ring buffer state %d\n",
953 d->hdr.state);
954 BUG_ON(port->tx_bufs[txi].skb->next);
955
956 port->tx_bufs[txi].skb->next = skb;
957 skb = port->tx_bufs[txi].skb;
958 port->tx_bufs[txi].skb = NULL;
959
960 ldc_unmap(port->vio.lp,
961 port->tx_bufs[txi].cookies,
962 port->tx_bufs[txi].ncookies);
963 } else if (d->hdr.state == VIO_DESC_FREE)
964 break;
965 d->hdr.state = VIO_DESC_FREE;
962 } 966 }
963 return skb; 967 return skb;
964} 968}
@@ -1633,16 +1637,9 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
1633 int i; 1637 int i;
1634 1638
1635 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 1639 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1636 if (dr->base) { 1640
1637 ldc_free_exp_dring(port->vio.lp, dr->base, 1641 if (dr->base == NULL)
1638 (dr->entry_size * dr->num_entries), 1642 return;
1639 dr->cookies, dr->ncookies);
1640 dr->base = NULL;
1641 dr->entry_size = 0;
1642 dr->num_entries = 0;
1643 dr->pending = 0;
1644 dr->ncookies = 0;
1645 }
1646 1643
1647 for (i = 0; i < VNET_TX_RING_SIZE; i++) { 1644 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1648 struct vio_net_desc *d; 1645 struct vio_net_desc *d;
@@ -1652,8 +1649,6 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
1652 continue; 1649 continue;
1653 1650
1654 d = vio_dring_entry(dr, i); 1651 d = vio_dring_entry(dr, i);
1655 if (d->hdr.state == VIO_DESC_READY)
1656 pr_warn("active transmit buffers freed\n");
1657 1652
1658 ldc_unmap(port->vio.lp, 1653 ldc_unmap(port->vio.lp,
1659 port->tx_bufs[i].cookies, 1654 port->tx_bufs[i].cookies,
@@ -1662,6 +1657,23 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
1662 port->tx_bufs[i].skb = NULL; 1657 port->tx_bufs[i].skb = NULL;
1663 d->hdr.state = VIO_DESC_FREE; 1658 d->hdr.state = VIO_DESC_FREE;
1664 } 1659 }
1660 ldc_free_exp_dring(port->vio.lp, dr->base,
1661 (dr->entry_size * dr->num_entries),
1662 dr->cookies, dr->ncookies);
1663 dr->base = NULL;
1664 dr->entry_size = 0;
1665 dr->num_entries = 0;
1666 dr->pending = 0;
1667 dr->ncookies = 0;
1668}
1669
1670static void vnet_port_reset(struct vnet_port *port)
1671{
1672 del_timer(&port->clean_timer);
1673 vnet_port_free_tx_bufs(port);
1674 port->rmtu = 0;
1675 port->tso = true;
1676 port->tsolen = 0;
1665} 1677}
1666 1678
1667static int vnet_port_alloc_tx_ring(struct vnet_port *port) 1679static int vnet_port_alloc_tx_ring(struct vnet_port *port)
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 6ab36d9ff2ab..a9cac8413e49 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1650,9 +1650,9 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1650 txd_mss); 1650 txd_mss);
1651 } 1651 }
1652 1652
1653 if (vlan_tx_tag_present(skb)) { 1653 if (skb_vlan_tag_present(skb)) {
1654 /*Cut VLAN ID to 12 bits */ 1654 /*Cut VLAN ID to 12 bits */
1655 txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12); 1655 txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12);
1656 txd_vtag = 1; 1656 txd_vtag = 1;
1657 } 1657 }
1658 1658
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 605dd909bcc3..3bc992cd70b7 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -56,12 +56,18 @@ config TI_CPSW_PHY_SEL
56 This driver supports configuring of the phy mode connected to 56 This driver supports configuring of the phy mode connected to
57 the CPSW. 57 the CPSW.
58 58
59config TI_CPSW_ALE
60 tristate "TI CPSW ALE Support"
61 ---help---
62 This driver supports TI's CPSW ALE module.
63
59config TI_CPSW 64config TI_CPSW
60 tristate "TI CPSW Switch Support" 65 tristate "TI CPSW Switch Support"
61 depends on ARCH_DAVINCI || ARCH_OMAP2PLUS 66 depends on ARCH_DAVINCI || ARCH_OMAP2PLUS
62 select TI_DAVINCI_CPDMA 67 select TI_DAVINCI_CPDMA
63 select TI_DAVINCI_MDIO 68 select TI_DAVINCI_MDIO
64 select TI_CPSW_PHY_SEL 69 select TI_CPSW_PHY_SEL
70 select TI_CPSW_ALE
65 select MFD_SYSCON 71 select MFD_SYSCON
66 select REGMAP 72 select REGMAP
67 ---help--- 73 ---help---
@@ -79,6 +85,25 @@ config TI_CPTS
79 the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4 85 the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
80 and Layer 2 packets, and the driver offers a PTP Hardware Clock. 86 and Layer 2 packets, and the driver offers a PTP Hardware Clock.
81 87
88config TI_KEYSTONE_NETCP
89 tristate "TI Keystone NETCP Core Support"
90 select TI_CPSW_ALE
91 depends on OF
92 depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
93 ---help---
94 This driver supports TI's Keystone NETCP Core.
95
96 To compile this driver as a module, choose M here: the module
97 will be called keystone_netcp.
98
99config TI_KEYSTONE_NETCP_ETHSS
100 depends on TI_KEYSTONE_NETCP
101 tristate "TI Keystone NETCP Ethernet subsystem Support"
102 ---help---
103
104 To compile this driver as a module, choose M here: the module
105 will be called keystone_netcp_ethss.
106
82config TLAN 107config TLAN
83 tristate "TI ThunderLAN support" 108 tristate "TI ThunderLAN support"
84 depends on (PCI || EISA) 109 depends on (PCI || EISA)
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 9cfaab8152be..d420d9413e4a 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -2,11 +2,20 @@
2# Makefile for the TI network device drivers. 2# Makefile for the TI network device drivers.
3# 3#
4 4
5obj-$(CONFIG_TI_CPSW) += cpsw-common.o
6obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
7
5obj-$(CONFIG_TLAN) += tlan.o 8obj-$(CONFIG_TLAN) += tlan.o
6obj-$(CONFIG_CPMAC) += cpmac.o 9obj-$(CONFIG_CPMAC) += cpmac.o
7obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o 10obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
8obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o 11obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
9obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o 12obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
10obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o 13obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
14obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
11obj-$(CONFIG_TI_CPSW) += ti_cpsw.o 15obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
12ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o 16ti_cpsw-y := cpsw.o cpts.o
17
18obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
19keystone_netcp-y := netcp_core.o
20obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
21keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
new file mode 100644
index 000000000000..f59509486113
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -0,0 +1,55 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/of_device.h>
17#include <linux/regmap.h>
18#include <linux/mfd/syscon.h>
19
20#include "cpsw.h"
21
22#define AM33XX_CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
23#define AM33XX_CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
24
25int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
26 u8 *mac_addr)
27{
28 u32 macid_lo;
29 u32 macid_hi;
30 struct regmap *syscon;
31
32 syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
33 if (IS_ERR(syscon)) {
34 if (PTR_ERR(syscon) == -ENODEV)
35 return 0;
36 return PTR_ERR(syscon);
37 }
38
39 regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(offset, slave),
40 &macid_lo);
41 regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(offset, slave),
42 &macid_hi);
43
44 mac_addr[5] = (macid_lo >> 8) & 0xff;
45 mac_addr[4] = macid_lo & 0xff;
46 mac_addr[3] = (macid_hi >> 24) & 0xff;
47 mac_addr[2] = (macid_hi >> 16) & 0xff;
48 mac_addr[1] = (macid_hi >> 8) & 0xff;
49 mac_addr[0] = macid_hi & 0xff;
50
51 return 0;
52}
53EXPORT_SYMBOL_GPL(cpsw_am33xx_cm_get_macid);
54
55MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a39131f494ec..7d8dd0d2182e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -33,8 +33,6 @@
33#include <linux/of_net.h> 33#include <linux/of_net.h>
34#include <linux/of_device.h> 34#include <linux/of_device.h>
35#include <linux/if_vlan.h> 35#include <linux/if_vlan.h>
36#include <linux/mfd/syscon.h>
37#include <linux/regmap.h>
38 36
39#include <linux/pinctrl/consumer.h> 37#include <linux/pinctrl/consumer.h>
40 38
@@ -761,17 +759,25 @@ requeue:
761 dev_kfree_skb_any(new_skb); 759 dev_kfree_skb_any(new_skb);
762} 760}
763 761
764static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 762static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
765{ 763{
766 struct cpsw_priv *priv = dev_id; 764 struct cpsw_priv *priv = dev_id;
767 int value = irq - priv->irqs_table[0];
768 765
769 /* NOTICE: Ending IRQ here. The trick with the 'value' variable above 766 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
770 * is to make sure we will always write the correct value to the EOI 767 cpdma_chan_process(priv->txch, 128);
771 * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2 768
772 * for TX Interrupt and 3 for MISC Interrupt. 769 priv = cpsw_get_slave_priv(priv, 1);
773 */ 770 if (priv)
774 cpdma_ctlr_eoi(priv->dma, value); 771 cpdma_chan_process(priv->txch, 128);
772
773 return IRQ_HANDLED;
774}
775
776static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
777{
778 struct cpsw_priv *priv = dev_id;
779
780 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
775 781
776 cpsw_intr_disable(priv); 782 cpsw_intr_disable(priv);
777 if (priv->irq_enabled == true) { 783 if (priv->irq_enabled == true) {
@@ -1624,7 +1630,8 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
1624 1630
1625 cpsw_intr_disable(priv); 1631 cpsw_intr_disable(priv);
1626 cpdma_ctlr_int_ctrl(priv->dma, false); 1632 cpdma_ctlr_int_ctrl(priv->dma, false);
1627 cpsw_interrupt(ndev->irq, priv); 1633 cpsw_rx_interrupt(priv->irqs_table[0], priv);
1634 cpsw_tx_interrupt(priv->irqs_table[1], priv);
1628 cpdma_ctlr_int_ctrl(priv->dma, true); 1635 cpdma_ctlr_int_ctrl(priv->dma, true);
1629 cpsw_intr_enable(priv); 1636 cpsw_intr_enable(priv);
1630} 1637}
@@ -1927,36 +1934,6 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
1927 slave->port_vlan = data->dual_emac_res_vlan; 1934 slave->port_vlan = data->dual_emac_res_vlan;
1928} 1935}
1929 1936
1930#define AM33XX_CTRL_MAC_LO_REG(id) (0x630 + 0x8 * id)
1931#define AM33XX_CTRL_MAC_HI_REG(id) (0x630 + 0x8 * id + 0x4)
1932
1933static int cpsw_am33xx_cm_get_macid(struct device *dev, int slave,
1934 u8 *mac_addr)
1935{
1936 u32 macid_lo;
1937 u32 macid_hi;
1938 struct regmap *syscon;
1939
1940 syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
1941 if (IS_ERR(syscon)) {
1942 if (PTR_ERR(syscon) == -ENODEV)
1943 return 0;
1944 return PTR_ERR(syscon);
1945 }
1946
1947 regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(slave), &macid_lo);
1948 regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(slave), &macid_hi);
1949
1950 mac_addr[5] = (macid_lo >> 8) & 0xff;
1951 mac_addr[4] = macid_lo & 0xff;
1952 mac_addr[3] = (macid_hi >> 24) & 0xff;
1953 mac_addr[2] = (macid_hi >> 16) & 0xff;
1954 mac_addr[1] = (macid_hi >> 8) & 0xff;
1955 mac_addr[0] = macid_hi & 0xff;
1956
1957 return 0;
1958}
1959
1960static int cpsw_probe_dt(struct cpsw_platform_data *data, 1937static int cpsw_probe_dt(struct cpsw_platform_data *data,
1961 struct platform_device *pdev) 1938 struct platform_device *pdev)
1962{ 1939{
@@ -2081,7 +2058,8 @@ no_phy_slave:
2081 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 2058 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
2082 } else { 2059 } else {
2083 if (of_machine_is_compatible("ti,am33xx")) { 2060 if (of_machine_is_compatible("ti,am33xx")) {
2084 ret = cpsw_am33xx_cm_get_macid(&pdev->dev, i, 2061 ret = cpsw_am33xx_cm_get_macid(&pdev->dev,
2062 0x630, i,
2085 slave_data->mac_addr); 2063 slave_data->mac_addr);
2086 if (ret) 2064 if (ret)
2087 return ret; 2065 return ret;
@@ -2192,7 +2170,8 @@ static int cpsw_probe(struct platform_device *pdev)
2192 void __iomem *ss_regs; 2170 void __iomem *ss_regs;
2193 struct resource *res, *ss_res; 2171 struct resource *res, *ss_res;
2194 u32 slave_offset, sliver_offset, slave_size; 2172 u32 slave_offset, sliver_offset, slave_size;
2195 int ret = 0, i, k = 0; 2173 int ret = 0, i;
2174 int irq;
2196 2175
2197 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 2176 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
2198 if (!ndev) { 2177 if (!ndev) {
@@ -2374,31 +2353,47 @@ static int cpsw_probe(struct platform_device *pdev)
2374 goto clean_dma_ret; 2353 goto clean_dma_ret;
2375 } 2354 }
2376 2355
2377 ndev->irq = platform_get_irq(pdev, 0); 2356 ndev->irq = platform_get_irq(pdev, 1);
2378 if (ndev->irq < 0) { 2357 if (ndev->irq < 0) {
2379 dev_err(priv->dev, "error getting irq resource\n"); 2358 dev_err(priv->dev, "error getting irq resource\n");
2380 ret = -ENOENT; 2359 ret = -ENOENT;
2381 goto clean_ale_ret; 2360 goto clean_ale_ret;
2382 } 2361 }
2383 2362
2384 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 2363 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
2385 if (k >= ARRAY_SIZE(priv->irqs_table)) { 2364 * MISC IRQs which are always kept disabled with this driver so
2386 ret = -EINVAL; 2365 * we will not request them.
2387 goto clean_ale_ret; 2366 *
2388 } 2367 * If anyone wants to implement support for those, make sure to
2368 * first request and append them to irqs_table array.
2369 */
2389 2370
2390 ret = devm_request_irq(&pdev->dev, res->start, cpsw_interrupt, 2371 /* RX IRQ */
2391 0, dev_name(&pdev->dev), priv); 2372 irq = platform_get_irq(pdev, 1);
2392 if (ret < 0) { 2373 if (irq < 0)
2393 dev_err(priv->dev, "error attaching irq (%d)\n", ret); 2374 goto clean_ale_ret;
2394 goto clean_ale_ret;
2395 }
2396 2375
2397 priv->irqs_table[k] = res->start; 2376 priv->irqs_table[0] = irq;
2398 k++; 2377 ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
2378 0, dev_name(&pdev->dev), priv);
2379 if (ret < 0) {
2380 dev_err(priv->dev, "error attaching irq (%d)\n", ret);
2381 goto clean_ale_ret;
2399 } 2382 }
2400 2383
2401 priv->num_irqs = k; 2384 /* TX IRQ */
2385 irq = platform_get_irq(pdev, 2);
2386 if (irq < 0)
2387 goto clean_ale_ret;
2388
2389 priv->irqs_table[1] = irq;
2390 ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
2391 0, dev_name(&pdev->dev), priv);
2392 if (ret < 0) {
2393 dev_err(priv->dev, "error attaching irq (%d)\n", ret);
2394 goto clean_ale_ret;
2395 }
2396 priv->num_irqs = 2;
2402 2397
2403 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2398 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2404 2399
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 1b710674630c..ca90efafd156 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -41,5 +41,7 @@ struct cpsw_platform_data {
41}; 41};
42 42
43void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave); 43void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
44int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
45 u8 *mac_addr);
44 46
45#endif /* __CPSW_H__ */ 47#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 5246b3a18ff8..6e927b4583aa 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -13,6 +13,7 @@
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/module.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17#include <linux/seq_file.h> 18#include <linux/seq_file.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
@@ -146,7 +147,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
146 return idx; 147 return idx;
147} 148}
148 149
149int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid) 150static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
150{ 151{
151 u32 ale_entry[ALE_ENTRY_WORDS]; 152 u32 ale_entry[ALE_ENTRY_WORDS];
152 int type, idx; 153 int type, idx;
@@ -167,7 +168,7 @@ int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
167 return -ENOENT; 168 return -ENOENT;
168} 169}
169 170
170int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid) 171static int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
171{ 172{
172 u32 ale_entry[ALE_ENTRY_WORDS]; 173 u32 ale_entry[ALE_ENTRY_WORDS];
173 int type, idx; 174 int type, idx;
@@ -265,6 +266,7 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
265 } 266 }
266 return 0; 267 return 0;
267} 268}
269EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
268 270
269static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry, 271static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
270 int port_mask) 272 int port_mask)
@@ -297,6 +299,7 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
297 } 299 }
298 return 0; 300 return 0;
299} 301}
302EXPORT_SYMBOL_GPL(cpsw_ale_flush);
300 303
301static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry, 304static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
302 int flags, u16 vid) 305 int flags, u16 vid)
@@ -334,6 +337,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
334 cpsw_ale_write(ale, idx, ale_entry); 337 cpsw_ale_write(ale, idx, ale_entry);
335 return 0; 338 return 0;
336} 339}
340EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast);
337 341
338int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, 342int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
339 int flags, u16 vid) 343 int flags, u16 vid)
@@ -349,6 +353,7 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
349 cpsw_ale_write(ale, idx, ale_entry); 353 cpsw_ale_write(ale, idx, ale_entry);
350 return 0; 354 return 0;
351} 355}
356EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast);
352 357
353int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, 358int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
354 int flags, u16 vid, int mcast_state) 359 int flags, u16 vid, int mcast_state)
@@ -380,6 +385,7 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
380 cpsw_ale_write(ale, idx, ale_entry); 385 cpsw_ale_write(ale, idx, ale_entry);
381 return 0; 386 return 0;
382} 387}
388EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast);
383 389
384int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, 390int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
385 int flags, u16 vid) 391 int flags, u16 vid)
@@ -401,6 +407,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
401 cpsw_ale_write(ale, idx, ale_entry); 407 cpsw_ale_write(ale, idx, ale_entry);
402 return 0; 408 return 0;
403} 409}
410EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast);
404 411
405int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, 412int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
406 int reg_mcast, int unreg_mcast) 413 int reg_mcast, int unreg_mcast)
@@ -430,6 +437,7 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
430 cpsw_ale_write(ale, idx, ale_entry); 437 cpsw_ale_write(ale, idx, ale_entry);
431 return 0; 438 return 0;
432} 439}
440EXPORT_SYMBOL_GPL(cpsw_ale_add_vlan);
433 441
434int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) 442int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
435{ 443{
@@ -450,6 +458,7 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
450 cpsw_ale_write(ale, idx, ale_entry); 458 cpsw_ale_write(ale, idx, ale_entry);
451 return 0; 459 return 0;
452} 460}
461EXPORT_SYMBOL_GPL(cpsw_ale_del_vlan);
453 462
454void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti) 463void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
455{ 464{
@@ -479,6 +488,7 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
479 cpsw_ale_write(ale, idx, ale_entry); 488 cpsw_ale_write(ale, idx, ale_entry);
480 } 489 }
481} 490}
491EXPORT_SYMBOL_GPL(cpsw_ale_set_allmulti);
482 492
483struct ale_control_info { 493struct ale_control_info {
484 const char *name; 494 const char *name;
@@ -704,6 +714,7 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
704 714
705 return 0; 715 return 0;
706} 716}
717EXPORT_SYMBOL_GPL(cpsw_ale_control_set);
707 718
708int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control) 719int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
709{ 720{
@@ -727,6 +738,7 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
727 tmp = __raw_readl(ale->params.ale_regs + offset) >> shift; 738 tmp = __raw_readl(ale->params.ale_regs + offset) >> shift;
728 return tmp & BITMASK(info->bits); 739 return tmp & BITMASK(info->bits);
729} 740}
741EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
730 742
731static void cpsw_ale_timer(unsigned long arg) 743static void cpsw_ale_timer(unsigned long arg)
732{ 744{
@@ -750,6 +762,7 @@ int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
750 } 762 }
751 return 0; 763 return 0;
752} 764}
765EXPORT_SYMBOL_GPL(cpsw_ale_set_ageout);
753 766
754void cpsw_ale_start(struct cpsw_ale *ale) 767void cpsw_ale_start(struct cpsw_ale *ale)
755{ 768{
@@ -769,11 +782,13 @@ void cpsw_ale_start(struct cpsw_ale *ale)
769 add_timer(&ale->timer); 782 add_timer(&ale->timer);
770 } 783 }
771} 784}
785EXPORT_SYMBOL_GPL(cpsw_ale_start);
772 786
773void cpsw_ale_stop(struct cpsw_ale *ale) 787void cpsw_ale_stop(struct cpsw_ale *ale)
774{ 788{
775 del_timer_sync(&ale->timer); 789 del_timer_sync(&ale->timer);
776} 790}
791EXPORT_SYMBOL_GPL(cpsw_ale_stop);
777 792
778struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) 793struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
779{ 794{
@@ -788,6 +803,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
788 803
789 return ale; 804 return ale;
790} 805}
806EXPORT_SYMBOL_GPL(cpsw_ale_create);
791 807
792int cpsw_ale_destroy(struct cpsw_ale *ale) 808int cpsw_ale_destroy(struct cpsw_ale *ale)
793{ 809{
@@ -797,6 +813,7 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
797 kfree(ale); 813 kfree(ale);
798 return 0; 814 return 0;
799} 815}
816EXPORT_SYMBOL_GPL(cpsw_ale_destroy);
800 817
801void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data) 818void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
802{ 819{
@@ -807,3 +824,8 @@ void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
807 data += ALE_ENTRY_WORDS; 824 data += ALE_ENTRY_WORDS;
808 } 825 }
809} 826}
827EXPORT_SYMBOL_GPL(cpsw_ale_dump);
828
829MODULE_LICENSE("GPL v2");
830MODULE_DESCRIPTION("TI CPSW ALE driver");
831MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 4a4388b813ac..fbe42cb107ec 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -157,14 +157,11 @@ static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
157 157
158static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 158static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
159{ 159{
160 s64 now;
161 unsigned long flags; 160 unsigned long flags;
162 struct cpts *cpts = container_of(ptp, struct cpts, info); 161 struct cpts *cpts = container_of(ptp, struct cpts, info);
163 162
164 spin_lock_irqsave(&cpts->lock, flags); 163 spin_lock_irqsave(&cpts->lock, flags);
165 now = timecounter_read(&cpts->tc); 164 timecounter_adjtime(&cpts->tc, delta);
166 now += delta;
167 timecounter_init(&cpts->tc, &cpts->cc, now);
168 spin_unlock_irqrestore(&cpts->lock, flags); 165 spin_unlock_irqrestore(&cpts->lock, flags);
169 166
170 return 0; 167 return 0;
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 1a581ef7eee8..69a46b92c7d6 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -27,6 +27,7 @@
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/ptp_clock_kernel.h> 28#include <linux/ptp_clock_kernel.h>
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/timecounter.h>
30 31
31struct cpsw_cpts { 32struct cpsw_cpts {
32 u32 idver; /* Identification and version */ 33 u32 idver; /* Identification and version */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 5fae4354722c..aeebc0a7bf47 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -52,6 +52,7 @@
52#include <linux/dma-mapping.h> 52#include <linux/dma-mapping.h>
53#include <linux/clk.h> 53#include <linux/clk.h>
54#include <linux/platform_device.h> 54#include <linux/platform_device.h>
55#include <linux/regmap.h>
55#include <linux/semaphore.h> 56#include <linux/semaphore.h>
56#include <linux/phy.h> 57#include <linux/phy.h>
57#include <linux/bitops.h> 58#include <linux/bitops.h>
@@ -65,10 +66,12 @@
65#include <linux/of_mdio.h> 66#include <linux/of_mdio.h>
66#include <linux/of_irq.h> 67#include <linux/of_irq.h>
67#include <linux/of_net.h> 68#include <linux/of_net.h>
69#include <linux/mfd/syscon.h>
68 70
69#include <asm/irq.h> 71#include <asm/irq.h>
70#include <asm/page.h> 72#include <asm/page.h>
71 73
74#include "cpsw.h"
72#include "davinci_cpdma.h" 75#include "davinci_cpdma.h"
73 76
74static int debug_level; 77static int debug_level;
@@ -1838,7 +1841,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1838 if (!is_valid_ether_addr(pdata->mac_addr)) { 1841 if (!is_valid_ether_addr(pdata->mac_addr)) {
1839 mac_addr = of_get_mac_address(np); 1842 mac_addr = of_get_mac_address(np);
1840 if (mac_addr) 1843 if (mac_addr)
1841 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN); 1844 ether_addr_copy(pdata->mac_addr, mac_addr);
1842 } 1845 }
1843 1846
1844 of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", 1847 of_property_read_u32(np, "ti,davinci-ctrl-reg-offset",
@@ -1879,6 +1882,53 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1879 return pdata; 1882 return pdata;
1880} 1883}
1881 1884
1885static int davinci_emac_3517_get_macid(struct device *dev, u16 offset,
1886 int slave, u8 *mac_addr)
1887{
1888 u32 macid_lsb;
1889 u32 macid_msb;
1890 struct regmap *syscon;
1891
1892 syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
1893 if (IS_ERR(syscon)) {
1894 if (PTR_ERR(syscon) == -ENODEV)
1895 return 0;
1896 return PTR_ERR(syscon);
1897 }
1898
1899 regmap_read(syscon, offset, &macid_lsb);
1900 regmap_read(syscon, offset + 4, &macid_msb);
1901
1902 mac_addr[0] = (macid_msb >> 16) & 0xff;
1903 mac_addr[1] = (macid_msb >> 8) & 0xff;
1904 mac_addr[2] = macid_msb & 0xff;
1905 mac_addr[3] = (macid_lsb >> 16) & 0xff;
1906 mac_addr[4] = (macid_lsb >> 8) & 0xff;
1907 mac_addr[5] = macid_lsb & 0xff;
1908
1909 return 0;
1910}
1911
1912static int davinci_emac_try_get_mac(struct platform_device *pdev,
1913 int instance, u8 *mac_addr)
1914{
1915 int error = -EINVAL;
1916
1917 if (!pdev->dev.of_node)
1918 return error;
1919
1920 if (of_device_is_compatible(pdev->dev.of_node, "ti,am3517-emac"))
1921 error = davinci_emac_3517_get_macid(&pdev->dev, 0x110,
1922 0, mac_addr);
1923 else if (of_device_is_compatible(pdev->dev.of_node,
1924 "ti,dm816-emac"))
1925 error = cpsw_am33xx_cm_get_macid(&pdev->dev, 0x30,
1926 instance,
1927 mac_addr);
1928
1929 return error;
1930}
1931
1882/** 1932/**
1883 * davinci_emac_probe - EMAC device probe 1933 * davinci_emac_probe - EMAC device probe
1884 * @pdev: The DaVinci EMAC device that we are removing 1934 * @pdev: The DaVinci EMAC device that we are removing
@@ -2009,6 +2059,10 @@ static int davinci_emac_probe(struct platform_device *pdev)
2009 } 2059 }
2010 ndev->irq = res->start; 2060 ndev->irq = res->start;
2011 2061
2062 rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
2063 if (!rc)
2064 ether_addr_copy(ndev->dev_addr, priv->mac_addr);
2065
2012 if (!is_valid_ether_addr(priv->mac_addr)) { 2066 if (!is_valid_ether_addr(priv->mac_addr)) {
2013 /* Use random MAC if none passed */ 2067 /* Use random MAC if none passed */
2014 eth_hw_addr_random(ndev); 2068 eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
new file mode 100644
index 000000000000..906e9bc412f5
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -0,0 +1,229 @@
1/*
2 * NetCP driver local header
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Wingman Kwok <w-kwok2@ti.com>
10 * Murali Karicheri <m-karicheri2@ti.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
15 *
16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17 * kind, whether express or implied; without even the implied warranty
18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21#ifndef __NETCP_H__
22#define __NETCP_H__
23
24#include <linux/netdevice.h>
25#include <linux/soc/ti/knav_dma.h>
26
27/* Maximum Ethernet frame size supported by Keystone switch */
28#define NETCP_MAX_FRAME_SIZE 9504
29
30#define SGMII_LINK_MAC_MAC_AUTONEG 0
31#define SGMII_LINK_MAC_PHY 1
32#define SGMII_LINK_MAC_MAC_FORCED 2
33#define SGMII_LINK_MAC_FIBER 3
34#define SGMII_LINK_MAC_PHY_NO_MDIO 4
35#define XGMII_LINK_MAC_PHY 10
36#define XGMII_LINK_MAC_MAC_FORCED 11
37
38struct netcp_device;
39
40struct netcp_tx_pipe {
41 struct netcp_device *netcp_device;
42 void *dma_queue;
43 unsigned int dma_queue_id;
44 u8 dma_psflags;
45 void *dma_channel;
46 const char *dma_chan_name;
47};
48
49#define ADDR_NEW BIT(0)
50#define ADDR_VALID BIT(1)
51
52enum netcp_addr_type {
53 ADDR_ANY,
54 ADDR_DEV,
55 ADDR_UCAST,
56 ADDR_MCAST,
57 ADDR_BCAST
58};
59
60struct netcp_addr {
61 struct netcp_intf *netcp;
62 unsigned char addr[ETH_ALEN];
63 enum netcp_addr_type type;
64 unsigned int flags;
65 struct list_head node;
66};
67
68struct netcp_intf {
69 struct device *dev;
70 struct device *ndev_dev;
71 struct net_device *ndev;
72 bool big_endian;
73 unsigned int tx_compl_qid;
74 void *tx_pool;
75 struct list_head txhook_list_head;
76 unsigned int tx_pause_threshold;
77 void *tx_compl_q;
78
79 unsigned int tx_resume_threshold;
80 void *rx_queue;
81 void *rx_pool;
82 struct list_head rxhook_list_head;
83 unsigned int rx_queue_id;
84 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
85 u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
86 struct napi_struct rx_napi;
87 struct napi_struct tx_napi;
88
89 void *rx_channel;
90 const char *dma_chan_name;
91 u32 rx_pool_size;
92 u32 rx_pool_region_id;
93 u32 tx_pool_size;
94 u32 tx_pool_region_id;
95 struct list_head module_head;
96 struct list_head interface_list;
97 struct list_head addr_list;
98 bool netdev_registered;
99 bool primary_module_attached;
100
101 /* Lock used for protecting Rx/Tx hook list management */
102 spinlock_t lock;
103 struct netcp_device *netcp_device;
104 struct device_node *node_interface;
105
106 /* DMA configuration data */
107 u32 msg_enable;
108 u32 rx_queue_depths[KNAV_DMA_FDQ_PER_CHAN];
109};
110
111#define NETCP_PSDATA_LEN KNAV_DMA_NUM_PS_WORDS
112struct netcp_packet {
113 struct sk_buff *skb;
114 u32 *epib;
115 u32 *psdata;
116 unsigned int psdata_len;
117 struct netcp_intf *netcp;
118 struct netcp_tx_pipe *tx_pipe;
119 bool rxtstamp_complete;
120 void *ts_context;
121
122 int (*txtstamp_complete)(void *ctx, struct netcp_packet *pkt);
123};
124
125static inline u32 *netcp_push_psdata(struct netcp_packet *p_info,
126 unsigned int bytes)
127{
128 u32 *buf;
129 unsigned int words;
130
131 if ((bytes & 0x03) != 0)
132 return NULL;
133 words = bytes >> 2;
134
135 if ((p_info->psdata_len + words) > NETCP_PSDATA_LEN)
136 return NULL;
137
138 p_info->psdata_len += words;
139 buf = &p_info->psdata[NETCP_PSDATA_LEN - p_info->psdata_len];
140 return buf;
141}
142
143static inline int netcp_align_psdata(struct netcp_packet *p_info,
144 unsigned int byte_align)
145{
146 int padding;
147
148 switch (byte_align) {
149 case 0:
150 padding = -EINVAL;
151 break;
152 case 1:
153 case 2:
154 case 4:
155 padding = 0;
156 break;
157 case 8:
158 padding = (p_info->psdata_len << 2) % 8;
159 break;
160 case 16:
161 padding = (p_info->psdata_len << 2) % 16;
162 break;
163 default:
164 padding = (p_info->psdata_len << 2) % byte_align;
165 break;
166 }
167 return padding;
168}
169
170struct netcp_module {
171 const char *name;
172 struct module *owner;
173 bool primary;
174
175 /* probe/remove: called once per NETCP instance */
176 int (*probe)(struct netcp_device *netcp_device,
177 struct device *device, struct device_node *node,
178 void **inst_priv);
179 int (*remove)(struct netcp_device *netcp_device, void *inst_priv);
180
181 /* attach/release: called once per network interface */
182 int (*attach)(void *inst_priv, struct net_device *ndev,
183 struct device_node *node, void **intf_priv);
184 int (*release)(void *intf_priv);
185 int (*open)(void *intf_priv, struct net_device *ndev);
186 int (*close)(void *intf_priv, struct net_device *ndev);
187 int (*add_addr)(void *intf_priv, struct netcp_addr *naddr);
188 int (*del_addr)(void *intf_priv, struct netcp_addr *naddr);
189 int (*add_vid)(void *intf_priv, int vid);
190 int (*del_vid)(void *intf_priv, int vid);
191 int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd);
192
193 /* used internally */
194 struct list_head module_list;
195 struct list_head interface_list;
196};
197
198int netcp_register_module(struct netcp_module *module);
199void netcp_unregister_module(struct netcp_module *module);
200void *netcp_module_get_intf_data(struct netcp_module *module,
201 struct netcp_intf *intf);
202
203int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
204 struct netcp_device *netcp_device,
205 const char *dma_chan_name, unsigned int dma_queue_id);
206int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe);
207int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe);
208
209typedef int netcp_hook_rtn(int order, void *data, struct netcp_packet *packet);
210int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
211 netcp_hook_rtn *hook_rtn, void *hook_data);
212int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
213 netcp_hook_rtn *hook_rtn, void *hook_data);
214int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
215 netcp_hook_rtn *hook_rtn, void *hook_data);
216int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
217 netcp_hook_rtn *hook_rtn, void *hook_data);
218void *netcp_device_find_module(struct netcp_device *netcp_device,
219 const char *name);
220
221/* SGMII functions */
222int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
223int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
224int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
225
226/* XGBE SERDES init functions */
227int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs);
228
229#endif /* __NETCP_H__ */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
new file mode 100644
index 000000000000..a31a8c3c8e7c
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -0,0 +1,2149 @@
1/*
2 * Keystone NetCP Core driver
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Murali Karicheri <m-karicheri2@ti.com>
10 * Wingman Kwok <w-kwok2@ti.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
15 *
16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17 * kind, whether express or implied; without even the implied warranty
18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21
22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/of_net.h>
25#include <linux/of_address.h>
26#include <linux/if_vlan.h>
27#include <linux/pm_runtime.h>
28#include <linux/platform_device.h>
29#include <linux/soc/ti/knav_qmss.h>
30#include <linux/soc/ti/knav_dma.h>
31
32#include "netcp.h"
33
34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35#define NETCP_NAPI_WEIGHT 64
36#define NETCP_TX_TIMEOUT (5 * HZ)
37#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
38#define NETCP_MAX_MCAST_ADDR 16
39
40#define NETCP_EFUSE_REG_INDEX 0
41
42#define NETCP_MOD_PROBE_SKIPPED 1
43#define NETCP_MOD_PROBE_FAILED 2
44
45#define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
46 NETIF_MSG_DRV | NETIF_MSG_LINK | \
47 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
48 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
49 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
50 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
51 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
52 NETIF_MSG_RX_STATUS)
53
54#define knav_queue_get_id(q) knav_queue_device_control(q, \
55 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
56
57#define knav_queue_enable_notify(q) knav_queue_device_control(q, \
58 KNAV_QUEUE_ENABLE_NOTIFY, \
59 (unsigned long)NULL)
60
61#define knav_queue_disable_notify(q) knav_queue_device_control(q, \
62 KNAV_QUEUE_DISABLE_NOTIFY, \
63 (unsigned long)NULL)
64
65#define knav_queue_get_count(q) knav_queue_device_control(q, \
66 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
67
68#define for_each_netcp_module(module) \
69 list_for_each_entry(module, &netcp_modules, module_list)
70
71#define for_each_netcp_device_module(netcp_device, inst_modpriv) \
72 list_for_each_entry(inst_modpriv, \
73 &((netcp_device)->modpriv_head), inst_list)
74
75#define for_each_module(netcp, intf_modpriv) \
76 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
77
78/* Module management structures */
79struct netcp_device {
80 struct list_head device_list;
81 struct list_head interface_head;
82 struct list_head modpriv_head;
83 struct device *device;
84};
85
86struct netcp_inst_modpriv {
87 struct netcp_device *netcp_device;
88 struct netcp_module *netcp_module;
89 struct list_head inst_list;
90 void *module_priv;
91};
92
93struct netcp_intf_modpriv {
94 struct netcp_intf *netcp_priv;
95 struct netcp_module *netcp_module;
96 struct list_head intf_list;
97 void *module_priv;
98};
99
100static LIST_HEAD(netcp_devices);
101static LIST_HEAD(netcp_modules);
102static DEFINE_MUTEX(netcp_modules_lock);
103
104static int netcp_debug_level = -1;
105module_param(netcp_debug_level, int, 0);
106MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
107
108/* Helper functions - Get/Set */
109static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc,
110 struct knav_dma_desc *desc)
111{
112 *buff_len = desc->buff_len;
113 *buff = desc->buff;
114 *ndesc = desc->next_desc;
115}
116
117static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc)
118{
119 *pad0 = desc->pad[0];
120 *pad1 = desc->pad[1];
121}
122
123static void get_org_pkt_info(u32 *buff, u32 *buff_len,
124 struct knav_dma_desc *desc)
125{
126 *buff = desc->orig_buff;
127 *buff_len = desc->orig_len;
128}
129
130static void get_words(u32 *words, int num_words, u32 *desc)
131{
132 int i;
133
134 for (i = 0; i < num_words; i++)
135 words[i] = desc[i];
136}
137
138static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc,
139 struct knav_dma_desc *desc)
140{
141 desc->buff_len = buff_len;
142 desc->buff = buff;
143 desc->next_desc = ndesc;
144}
145
146static void set_desc_info(u32 desc_info, u32 pkt_info,
147 struct knav_dma_desc *desc)
148{
149 desc->desc_info = desc_info;
150 desc->packet_info = pkt_info;
151}
152
153static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc)
154{
155 desc->pad[0] = pad0;
156 desc->pad[1] = pad1;
157}
158
159static void set_org_pkt_info(u32 buff, u32 buff_len,
160 struct knav_dma_desc *desc)
161{
162 desc->orig_buff = buff;
163 desc->orig_len = buff_len;
164}
165
166static void set_words(u32 *words, int num_words, u32 *desc)
167{
168 int i;
169
170 for (i = 0; i < num_words; i++)
171 desc[i] = words[i];
172}
173
174/* Read the e-fuse value as 32 bit values to be endian independent */
175static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
176{
177 unsigned int addr0, addr1;
178
179 addr1 = readl(efuse_mac + 4);
180 addr0 = readl(efuse_mac);
181
182 x[0] = (addr1 & 0x0000ff00) >> 8;
183 x[1] = addr1 & 0x000000ff;
184 x[2] = (addr0 & 0xff000000) >> 24;
185 x[3] = (addr0 & 0x00ff0000) >> 16;
186 x[4] = (addr0 & 0x0000ff00) >> 8;
187 x[5] = addr0 & 0x000000ff;
188
189 return 0;
190}
191
192static const char *netcp_node_name(struct device_node *node)
193{
194 const char *name;
195
196 if (of_property_read_string(node, "label", &name) < 0)
197 name = node->name;
198 if (!name)
199 name = "unknown";
200 return name;
201}
202
203/* Module management routines */
204static int netcp_register_interface(struct netcp_intf *netcp)
205{
206 int ret;
207
208 ret = register_netdev(netcp->ndev);
209 if (!ret)
210 netcp->netdev_registered = true;
211 return ret;
212}
213
214static int netcp_module_probe(struct netcp_device *netcp_device,
215 struct netcp_module *module)
216{
217 struct device *dev = netcp_device->device;
218 struct device_node *devices, *interface, *node = dev->of_node;
219 struct device_node *child;
220 struct netcp_inst_modpriv *inst_modpriv;
221 struct netcp_intf *netcp_intf;
222 struct netcp_module *tmp;
223 bool primary_module_registered = false;
224 int ret;
225
226 /* Find this module in the sub-tree for this device */
227 devices = of_get_child_by_name(node, "netcp-devices");
228 if (!devices) {
229 dev_err(dev, "could not find netcp-devices node\n");
230 return NETCP_MOD_PROBE_SKIPPED;
231 }
232
233 for_each_available_child_of_node(devices, child) {
234 const char *name = netcp_node_name(child);
235
236 if (!strcasecmp(module->name, name))
237 break;
238 }
239
240 of_node_put(devices);
241 /* If module not used for this device, skip it */
242 if (!child) {
243 dev_warn(dev, "module(%s) not used for device\n", module->name);
244 return NETCP_MOD_PROBE_SKIPPED;
245 }
246
247 inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL);
248 if (!inst_modpriv) {
249 of_node_put(child);
250 return -ENOMEM;
251 }
252
253 inst_modpriv->netcp_device = netcp_device;
254 inst_modpriv->netcp_module = module;
255 list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head);
256
257 ret = module->probe(netcp_device, dev, child,
258 &inst_modpriv->module_priv);
259 of_node_put(child);
260 if (ret) {
261 dev_err(dev, "Probe of module(%s) failed with %d\n",
262 module->name, ret);
263 list_del(&inst_modpriv->inst_list);
264 devm_kfree(dev, inst_modpriv);
265 return NETCP_MOD_PROBE_FAILED;
266 }
267
268 /* Attach modules only if the primary module is probed */
269 for_each_netcp_module(tmp) {
270 if (tmp->primary)
271 primary_module_registered = true;
272 }
273
274 if (!primary_module_registered)
275 return 0;
276
277 /* Attach module to interfaces */
278 list_for_each_entry(netcp_intf, &netcp_device->interface_head,
279 interface_list) {
280 struct netcp_intf_modpriv *intf_modpriv;
281
282 /* If interface not registered then register now */
283 if (!netcp_intf->netdev_registered)
284 ret = netcp_register_interface(netcp_intf);
285
286 if (ret)
287 return -ENODEV;
288
289 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
290 GFP_KERNEL);
291 if (!intf_modpriv)
292 return -ENOMEM;
293
294 interface = of_parse_phandle(netcp_intf->node_interface,
295 module->name, 0);
296
297 intf_modpriv->netcp_priv = netcp_intf;
298 intf_modpriv->netcp_module = module;
299 list_add_tail(&intf_modpriv->intf_list,
300 &netcp_intf->module_head);
301
302 ret = module->attach(inst_modpriv->module_priv,
303 netcp_intf->ndev, interface,
304 &intf_modpriv->module_priv);
305 of_node_put(interface);
306 if (ret) {
307 dev_dbg(dev, "Attach of module %s declined with %d\n",
308 module->name, ret);
309 list_del(&intf_modpriv->intf_list);
310 devm_kfree(dev, intf_modpriv);
311 continue;
312 }
313 }
314 return 0;
315}
316
317int netcp_register_module(struct netcp_module *module)
318{
319 struct netcp_device *netcp_device;
320 struct netcp_module *tmp;
321 int ret;
322
323 if (!module->name) {
324 WARN(1, "error registering netcp module: no name\n");
325 return -EINVAL;
326 }
327
328 if (!module->probe) {
329 WARN(1, "error registering netcp module: no probe\n");
330 return -EINVAL;
331 }
332
333 mutex_lock(&netcp_modules_lock);
334
335 for_each_netcp_module(tmp) {
336 if (!strcasecmp(tmp->name, module->name)) {
337 mutex_unlock(&netcp_modules_lock);
338 return -EEXIST;
339 }
340 }
341 list_add_tail(&module->module_list, &netcp_modules);
342
343 list_for_each_entry(netcp_device, &netcp_devices, device_list) {
344 ret = netcp_module_probe(netcp_device, module);
345 if (ret < 0)
346 goto fail;
347 }
348
349 mutex_unlock(&netcp_modules_lock);
350 return 0;
351
352fail:
353 mutex_unlock(&netcp_modules_lock);
354 netcp_unregister_module(module);
355 return ret;
356}
357EXPORT_SYMBOL_GPL(netcp_register_module);
358
359static void netcp_release_module(struct netcp_device *netcp_device,
360 struct netcp_module *module)
361{
362 struct netcp_inst_modpriv *inst_modpriv, *inst_tmp;
363 struct netcp_intf *netcp_intf, *netcp_tmp;
364 struct device *dev = netcp_device->device;
365
366 /* Release the module from each interface */
367 list_for_each_entry_safe(netcp_intf, netcp_tmp,
368 &netcp_device->interface_head,
369 interface_list) {
370 struct netcp_intf_modpriv *intf_modpriv, *intf_tmp;
371
372 list_for_each_entry_safe(intf_modpriv, intf_tmp,
373 &netcp_intf->module_head,
374 intf_list) {
375 if (intf_modpriv->netcp_module == module) {
376 module->release(intf_modpriv->module_priv);
377 list_del(&intf_modpriv->intf_list);
378 devm_kfree(dev, intf_modpriv);
379 break;
380 }
381 }
382 }
383
384 /* Remove the module from each instance */
385 list_for_each_entry_safe(inst_modpriv, inst_tmp,
386 &netcp_device->modpriv_head, inst_list) {
387 if (inst_modpriv->netcp_module == module) {
388 module->remove(netcp_device,
389 inst_modpriv->module_priv);
390 list_del(&inst_modpriv->inst_list);
391 devm_kfree(dev, inst_modpriv);
392 break;
393 }
394 }
395}
396
397void netcp_unregister_module(struct netcp_module *module)
398{
399 struct netcp_device *netcp_device;
400 struct netcp_module *module_tmp;
401
402 mutex_lock(&netcp_modules_lock);
403
404 list_for_each_entry(netcp_device, &netcp_devices, device_list) {
405 netcp_release_module(netcp_device, module);
406 }
407
408 /* Remove the module from the module list */
409 for_each_netcp_module(module_tmp) {
410 if (module == module_tmp) {
411 list_del(&module->module_list);
412 break;
413 }
414 }
415
416 mutex_unlock(&netcp_modules_lock);
417}
418EXPORT_SYMBOL_GPL(netcp_unregister_module);
419
420void *netcp_module_get_intf_data(struct netcp_module *module,
421 struct netcp_intf *intf)
422{
423 struct netcp_intf_modpriv *intf_modpriv;
424
425 list_for_each_entry(intf_modpriv, &intf->module_head, intf_list)
426 if (intf_modpriv->netcp_module == module)
427 return intf_modpriv->module_priv;
428 return NULL;
429}
430EXPORT_SYMBOL_GPL(netcp_module_get_intf_data);
431
432/* Module TX and RX Hook management */
433struct netcp_hook_list {
434 struct list_head list;
435 netcp_hook_rtn *hook_rtn;
436 void *hook_data;
437 int order;
438};
439
440int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
441 netcp_hook_rtn *hook_rtn, void *hook_data)
442{
443 struct netcp_hook_list *entry;
444 struct netcp_hook_list *next;
445 unsigned long flags;
446
447 entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
448 if (!entry)
449 return -ENOMEM;
450
451 entry->hook_rtn = hook_rtn;
452 entry->hook_data = hook_data;
453 entry->order = order;
454
455 spin_lock_irqsave(&netcp_priv->lock, flags);
456 list_for_each_entry(next, &netcp_priv->txhook_list_head, list) {
457 if (next->order > order)
458 break;
459 }
460 __list_add(&entry->list, next->list.prev, &next->list);
461 spin_unlock_irqrestore(&netcp_priv->lock, flags);
462
463 return 0;
464}
465EXPORT_SYMBOL_GPL(netcp_register_txhook);
466
467int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
468 netcp_hook_rtn *hook_rtn, void *hook_data)
469{
470 struct netcp_hook_list *next, *n;
471 unsigned long flags;
472
473 spin_lock_irqsave(&netcp_priv->lock, flags);
474 list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) {
475 if ((next->order == order) &&
476 (next->hook_rtn == hook_rtn) &&
477 (next->hook_data == hook_data)) {
478 list_del(&next->list);
479 spin_unlock_irqrestore(&netcp_priv->lock, flags);
480 devm_kfree(netcp_priv->dev, next);
481 return 0;
482 }
483 }
484 spin_unlock_irqrestore(&netcp_priv->lock, flags);
485 return -ENOENT;
486}
487EXPORT_SYMBOL_GPL(netcp_unregister_txhook);
488
489int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
490 netcp_hook_rtn *hook_rtn, void *hook_data)
491{
492 struct netcp_hook_list *entry;
493 struct netcp_hook_list *next;
494 unsigned long flags;
495
496 entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
497 if (!entry)
498 return -ENOMEM;
499
500 entry->hook_rtn = hook_rtn;
501 entry->hook_data = hook_data;
502 entry->order = order;
503
504 spin_lock_irqsave(&netcp_priv->lock, flags);
505 list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) {
506 if (next->order > order)
507 break;
508 }
509 __list_add(&entry->list, next->list.prev, &next->list);
510 spin_unlock_irqrestore(&netcp_priv->lock, flags);
511
512 return 0;
513}
514
515int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
516 netcp_hook_rtn *hook_rtn, void *hook_data)
517{
518 struct netcp_hook_list *next, *n;
519 unsigned long flags;
520
521 spin_lock_irqsave(&netcp_priv->lock, flags);
522 list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) {
523 if ((next->order == order) &&
524 (next->hook_rtn == hook_rtn) &&
525 (next->hook_data == hook_data)) {
526 list_del(&next->list);
527 spin_unlock_irqrestore(&netcp_priv->lock, flags);
528 devm_kfree(netcp_priv->dev, next);
529 return 0;
530 }
531 }
532 spin_unlock_irqrestore(&netcp_priv->lock, flags);
533
534 return -ENOENT;
535}
536
537static void netcp_frag_free(bool is_frag, void *ptr)
538{
539 if (is_frag)
540 put_page(virt_to_head_page(ptr));
541 else
542 kfree(ptr);
543}
544
545static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
546 struct knav_dma_desc *desc)
547{
548 struct knav_dma_desc *ndesc;
549 dma_addr_t dma_desc, dma_buf;
550 unsigned int buf_len, dma_sz = sizeof(*ndesc);
551 void *buf_ptr;
552 u32 tmp;
553
554 get_words(&dma_desc, 1, &desc->next_desc);
555
556 while (dma_desc) {
557 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
558 if (unlikely(!ndesc)) {
559 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
560 break;
561 }
562 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
563 get_pad_info((u32 *)&buf_ptr, &tmp, ndesc);
564 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
565 __free_page(buf_ptr);
566 knav_pool_desc_put(netcp->rx_pool, desc);
567 }
568
569 get_pad_info((u32 *)&buf_ptr, &buf_len, desc);
570 if (buf_ptr)
571 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
572 knav_pool_desc_put(netcp->rx_pool, desc);
573}
574
575static void netcp_empty_rx_queue(struct netcp_intf *netcp)
576{
577 struct knav_dma_desc *desc;
578 unsigned int dma_sz;
579 dma_addr_t dma;
580
581 for (; ;) {
582 dma = knav_queue_pop(netcp->rx_queue, &dma_sz);
583 if (!dma)
584 break;
585
586 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
587 if (unlikely(!desc)) {
588 dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
589 __func__);
590 netcp->ndev->stats.rx_errors++;
591 continue;
592 }
593 netcp_free_rx_desc_chain(netcp, desc);
594 netcp->ndev->stats.rx_dropped++;
595 }
596}
597
598static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
599{
600 unsigned int dma_sz, buf_len, org_buf_len;
601 struct knav_dma_desc *desc, *ndesc;
602 unsigned int pkt_sz = 0, accum_sz;
603 struct netcp_hook_list *rx_hook;
604 dma_addr_t dma_desc, dma_buff;
605 struct netcp_packet p_info;
606 struct sk_buff *skb;
607 void *org_buf_ptr;
608 u32 tmp;
609
610 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
611 if (!dma_desc)
612 return -1;
613
614 desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
615 if (unlikely(!desc)) {
616 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
617 return 0;
618 }
619
620 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
621 get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc);
622
623 if (unlikely(!org_buf_ptr)) {
624 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
625 goto free_desc;
626 }
627
628 pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK;
629 accum_sz = buf_len;
630 dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE);
631
632 /* Build a new sk_buff for the primary buffer */
633 skb = build_skb(org_buf_ptr, org_buf_len);
634 if (unlikely(!skb)) {
635 dev_err(netcp->ndev_dev, "build_skb() failed\n");
636 goto free_desc;
637 }
638
639 /* update data, tail and len */
640 skb_reserve(skb, NETCP_SOP_OFFSET);
641 __skb_put(skb, buf_len);
642
643 /* Fill in the page fragment list */
644 while (dma_desc) {
645 struct page *page;
646
647 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
648 if (unlikely(!ndesc)) {
649 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
650 goto free_desc;
651 }
652
653 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
654 get_pad_info((u32 *)&page, &tmp, ndesc);
655
656 if (likely(dma_buff && buf_len && page)) {
657 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
658 DMA_FROM_DEVICE);
659 } else {
660 dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n",
661 (void *)dma_buff, buf_len, page);
662 goto free_desc;
663 }
664
665 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
666 offset_in_page(dma_buff), buf_len, PAGE_SIZE);
667 accum_sz += buf_len;
668
669 /* Free the descriptor */
670 knav_pool_desc_put(netcp->rx_pool, ndesc);
671 }
672
673 /* Free the primary descriptor */
674 knav_pool_desc_put(netcp->rx_pool, desc);
675
676 /* check for packet len and warn */
677 if (unlikely(pkt_sz != accum_sz))
678 dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
679 pkt_sz, accum_sz);
680
681 /* Remove ethernet FCS from the packet */
682 __pskb_trim(skb, skb->len - ETH_FCS_LEN);
683
684 /* Call each of the RX hooks */
685 p_info.skb = skb;
686 p_info.rxtstamp_complete = false;
687 list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
688 int ret;
689
690 ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
691 &p_info);
692 if (unlikely(ret)) {
693 dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
694 rx_hook->order, ret);
695 netcp->ndev->stats.rx_errors++;
696 dev_kfree_skb(skb);
697 return 0;
698 }
699 }
700
701 netcp->ndev->last_rx = jiffies;
702 netcp->ndev->stats.rx_packets++;
703 netcp->ndev->stats.rx_bytes += skb->len;
704
705 /* push skb up the stack */
706 skb->protocol = eth_type_trans(skb, netcp->ndev);
707 netif_receive_skb(skb);
708 return 0;
709
710free_desc:
711 netcp_free_rx_desc_chain(netcp, desc);
712 netcp->ndev->stats.rx_errors++;
713 return 0;
714}
715
716static int netcp_process_rx_packets(struct netcp_intf *netcp,
717 unsigned int budget)
718{
719 int i;
720
721 for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
722 ;
723 return i;
724}
725
726/* Release descriptors and attached buffers from Rx FDQ */
727static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
728{
729 struct knav_dma_desc *desc;
730 unsigned int buf_len, dma_sz;
731 dma_addr_t dma;
732 void *buf_ptr;
733 u32 tmp;
734
735 /* Allocate descriptor */
736 while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
737 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
738 if (unlikely(!desc)) {
739 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
740 continue;
741 }
742
743 get_org_pkt_info(&dma, &buf_len, desc);
744 get_pad_info((u32 *)&buf_ptr, &tmp, desc);
745
746 if (unlikely(!dma)) {
747 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
748 knav_pool_desc_put(netcp->rx_pool, desc);
749 continue;
750 }
751
752 if (unlikely(!buf_ptr)) {
753 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
754 knav_pool_desc_put(netcp->rx_pool, desc);
755 continue;
756 }
757
758 if (fdq == 0) {
759 dma_unmap_single(netcp->dev, dma, buf_len,
760 DMA_FROM_DEVICE);
761 netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
762 } else {
763 dma_unmap_page(netcp->dev, dma, buf_len,
764 DMA_FROM_DEVICE);
765 __free_page(buf_ptr);
766 }
767
768 knav_pool_desc_put(netcp->rx_pool, desc);
769 }
770}
771
772static void netcp_rxpool_free(struct netcp_intf *netcp)
773{
774 int i;
775
776 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
777 !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++)
778 netcp_free_rx_buf(netcp, i);
779
780 if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
781 dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n",
782 netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
783
784 knav_pool_destroy(netcp->rx_pool);
785 netcp->rx_pool = NULL;
786}
787
788static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
789{
790 struct knav_dma_desc *hwdesc;
791 unsigned int buf_len, dma_sz;
792 u32 desc_info, pkt_info;
793 struct page *page;
794 dma_addr_t dma;
795 void *bufptr;
796 u32 pad[2];
797
798 /* Allocate descriptor */
799 hwdesc = knav_pool_desc_get(netcp->rx_pool);
800 if (IS_ERR_OR_NULL(hwdesc)) {
801 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
802 return;
803 }
804
805 if (likely(fdq == 0)) {
806 unsigned int primary_buf_len;
807 /* Allocate a primary receive queue entry */
808 buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
809 primary_buf_len = SKB_DATA_ALIGN(buf_len) +
810 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
811
812 if (primary_buf_len <= PAGE_SIZE) {
813 bufptr = netdev_alloc_frag(primary_buf_len);
814 pad[1] = primary_buf_len;
815 } else {
816 bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
817 GFP_DMA32 | __GFP_COLD);
818 pad[1] = 0;
819 }
820
821 if (unlikely(!bufptr)) {
822 dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
823 goto fail;
824 }
825 dma = dma_map_single(netcp->dev, bufptr, buf_len,
826 DMA_TO_DEVICE);
827 pad[0] = (u32)bufptr;
828
829 } else {
830 /* Allocate a secondary receive queue entry */
831 page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
832 if (unlikely(!page)) {
833 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
834 goto fail;
835 }
836 buf_len = PAGE_SIZE;
837 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
838 pad[0] = (u32)page;
839 pad[1] = 0;
840 }
841
842 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
843 desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
844 pkt_info = KNAV_DMA_DESC_HAS_EPIB;
845 pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
846 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
847 KNAV_DMA_DESC_RETQ_SHIFT;
848 set_org_pkt_info(dma, buf_len, hwdesc);
849 set_pad_info(pad[0], pad[1], hwdesc);
850 set_desc_info(desc_info, pkt_info, hwdesc);
851
852 /* Push to FDQs */
853 knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
854 &dma_sz);
855 knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
856 return;
857
858fail:
859 knav_pool_desc_put(netcp->rx_pool, hwdesc);
860}
861
862/* Refill Rx FDQ with descriptors & attached buffers */
863static void netcp_rxpool_refill(struct netcp_intf *netcp)
864{
865 u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
866 int i;
867
868 /* Calculate the FDQ deficit and refill */
869 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
870 fdq_deficit[i] = netcp->rx_queue_depths[i] -
871 knav_queue_get_count(netcp->rx_fdq[i]);
872
873 while (fdq_deficit[i]--)
874 netcp_allocate_rx_buf(netcp, i);
875 } /* end for fdqs */
876}
877
878/* NAPI poll */
879static int netcp_rx_poll(struct napi_struct *napi, int budget)
880{
881 struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
882 rx_napi);
883 unsigned int packets;
884
885 packets = netcp_process_rx_packets(netcp, budget);
886
887 if (packets < budget) {
888 napi_complete(&netcp->rx_napi);
889 knav_queue_enable_notify(netcp->rx_queue);
890 }
891
892 netcp_rxpool_refill(netcp);
893 return packets;
894}
895
896static void netcp_rx_notify(void *arg)
897{
898 struct netcp_intf *netcp = arg;
899
900 knav_queue_disable_notify(netcp->rx_queue);
901 napi_schedule(&netcp->rx_napi);
902}
903
904static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
905 struct knav_dma_desc *desc,
906 unsigned int desc_sz)
907{
908 struct knav_dma_desc *ndesc = desc;
909 dma_addr_t dma_desc, dma_buf;
910 unsigned int buf_len;
911
912 while (ndesc) {
913 get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc);
914
915 if (dma_buf && buf_len)
916 dma_unmap_single(netcp->dev, dma_buf, buf_len,
917 DMA_TO_DEVICE);
918 else
919 dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n",
920 (void *)dma_buf, buf_len);
921
922 knav_pool_desc_put(netcp->tx_pool, ndesc);
923 ndesc = NULL;
924 if (dma_desc) {
925 ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc,
926 desc_sz);
927 if (!ndesc)
928 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
929 }
930 }
931}
932
933static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
934 unsigned int budget)
935{
936 struct knav_dma_desc *desc;
937 struct sk_buff *skb;
938 unsigned int dma_sz;
939 dma_addr_t dma;
940 int pkts = 0;
941 u32 tmp;
942
943 while (budget--) {
944 dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
945 if (!dma)
946 break;
947 desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
948 if (unlikely(!desc)) {
949 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
950 netcp->ndev->stats.tx_errors++;
951 continue;
952 }
953
954 get_pad_info((u32 *)&skb, &tmp, desc);
955 netcp_free_tx_desc_chain(netcp, desc, dma_sz);
956 if (!skb) {
957 dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
958 netcp->ndev->stats.tx_errors++;
959 continue;
960 }
961
962 if (netif_subqueue_stopped(netcp->ndev, skb) &&
963 netif_running(netcp->ndev) &&
964 (knav_pool_count(netcp->tx_pool) >
965 netcp->tx_resume_threshold)) {
966 u16 subqueue = skb_get_queue_mapping(skb);
967
968 netif_wake_subqueue(netcp->ndev, subqueue);
969 }
970
971 netcp->ndev->stats.tx_packets++;
972 netcp->ndev->stats.tx_bytes += skb->len;
973 dev_kfree_skb(skb);
974 pkts++;
975 }
976 return pkts;
977}
978
979static int netcp_tx_poll(struct napi_struct *napi, int budget)
980{
981 int packets;
982 struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
983 tx_napi);
984
985 packets = netcp_process_tx_compl_packets(netcp, budget);
986 if (packets < budget) {
987 napi_complete(&netcp->tx_napi);
988 knav_queue_enable_notify(netcp->tx_compl_q);
989 }
990
991 return packets;
992}
993
994static void netcp_tx_notify(void *arg)
995{
996 struct netcp_intf *netcp = arg;
997
998 knav_queue_disable_notify(netcp->tx_compl_q);
999 napi_schedule(&netcp->tx_napi);
1000}
1001
1002static struct knav_dma_desc*
1003netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1004{
1005 struct knav_dma_desc *desc, *ndesc, *pdesc;
1006 unsigned int pkt_len = skb_headlen(skb);
1007 struct device *dev = netcp->dev;
1008 dma_addr_t dma_addr;
1009 unsigned int dma_sz;
1010 int i;
1011
1012 /* Map the linear buffer */
1013 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
1014 if (unlikely(!dma_addr)) {
1015 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
1016 return NULL;
1017 }
1018
1019 desc = knav_pool_desc_get(netcp->tx_pool);
1020 if (unlikely(IS_ERR_OR_NULL(desc))) {
1021 dev_err(netcp->ndev_dev, "out of TX desc\n");
1022 dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
1023 return NULL;
1024 }
1025
1026 set_pkt_info(dma_addr, pkt_len, 0, desc);
1027 if (skb_is_nonlinear(skb)) {
1028 prefetchw(skb_shinfo(skb));
1029 } else {
1030 desc->next_desc = 0;
1031 goto upd_pkt_len;
1032 }
1033
1034 pdesc = desc;
1035
1036 /* Handle the case where skb is fragmented in pages */
1037 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1038 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1039 struct page *page = skb_frag_page(frag);
1040 u32 page_offset = frag->page_offset;
1041 u32 buf_len = skb_frag_size(frag);
1042 dma_addr_t desc_dma;
1043 u32 pkt_info;
1044
1045 dma_addr = dma_map_page(dev, page, page_offset, buf_len,
1046 DMA_TO_DEVICE);
1047 if (unlikely(!dma_addr)) {
1048 dev_err(netcp->ndev_dev, "Failed to map skb page\n");
1049 goto free_descs;
1050 }
1051
1052 ndesc = knav_pool_desc_get(netcp->tx_pool);
1053 if (unlikely(IS_ERR_OR_NULL(ndesc))) {
1054 dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
1055 dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
1056 goto free_descs;
1057 }
1058
1059 desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool,
1060 (void *)ndesc);
1061 pkt_info =
1062 (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
1063 KNAV_DMA_DESC_RETQ_SHIFT;
1064 set_pkt_info(dma_addr, buf_len, 0, ndesc);
1065 set_words(&desc_dma, 1, &pdesc->next_desc);
1066 pkt_len += buf_len;
1067 if (pdesc != desc)
1068 knav_pool_desc_map(netcp->tx_pool, pdesc,
1069 sizeof(*pdesc), &desc_dma, &dma_sz);
1070 pdesc = ndesc;
1071 }
1072 if (pdesc != desc)
1073 knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc),
1074 &dma_addr, &dma_sz);
1075
1076 /* frag list based linkage is not supported for now. */
1077 if (skb_shinfo(skb)->frag_list) {
1078 dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n");
1079 goto free_descs;
1080 }
1081
1082upd_pkt_len:
1083 WARN_ON(pkt_len != skb->len);
1084
1085 pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK;
1086 set_words(&pkt_len, 1, &desc->desc_info);
1087 return desc;
1088
1089free_descs:
1090 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1091 return NULL;
1092}
1093
1094static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1095 struct sk_buff *skb,
1096 struct knav_dma_desc *desc)
1097{
1098 struct netcp_tx_pipe *tx_pipe = NULL;
1099 struct netcp_hook_list *tx_hook;
1100 struct netcp_packet p_info;
1101 u32 packet_info = 0;
1102 unsigned int dma_sz;
1103 dma_addr_t dma;
1104 int ret = 0;
1105
1106 p_info.netcp = netcp;
1107 p_info.skb = skb;
1108 p_info.tx_pipe = NULL;
1109 p_info.psdata_len = 0;
1110 p_info.ts_context = NULL;
1111 p_info.txtstamp_complete = NULL;
1112 p_info.epib = desc->epib;
1113 p_info.psdata = desc->psdata;
1114 memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32));
1115
1116 /* Find out where to inject the packet for transmission */
1117 list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
1118 ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
1119 &p_info);
1120 if (unlikely(ret != 0)) {
1121 dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
1122 tx_hook->order, ret);
1123 ret = (ret < 0) ? ret : NETDEV_TX_OK;
1124 goto out;
1125 }
1126 }
1127
1128 /* Make sure some TX hook claimed the packet */
1129 tx_pipe = p_info.tx_pipe;
1130 if (!tx_pipe) {
1131 dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
1132 ret = -ENXIO;
1133 goto out;
1134 }
1135
1136 /* update descriptor */
1137 if (p_info.psdata_len) {
1138 u32 *psdata = p_info.psdata;
1139
1140 memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
1141 p_info.psdata_len);
1142 set_words(psdata, p_info.psdata_len, psdata);
1143 packet_info |=
1144 (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
1145 KNAV_DMA_DESC_PSLEN_SHIFT;
1146 }
1147
1148 packet_info |= KNAV_DMA_DESC_HAS_EPIB |
1149 ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
1150 KNAV_DMA_DESC_RETQ_SHIFT) |
1151 ((tx_pipe->dma_psflags & KNAV_DMA_DESC_PSFLAG_MASK) <<
1152 KNAV_DMA_DESC_PSFLAG_SHIFT);
1153
1154 set_words(&packet_info, 1, &desc->packet_info);
1155 set_words((u32 *)&skb, 1, &desc->pad[0]);
1156
1157 /* submit packet descriptor */
1158 ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
1159 &dma_sz);
1160 if (unlikely(ret)) {
1161 dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
1162 ret = -ENOMEM;
1163 goto out;
1164 }
1165 skb_tx_timestamp(skb);
1166 knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
1167
1168out:
1169 return ret;
1170}
1171
1172/* Submit the packet */
1173static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1174{
1175 struct netcp_intf *netcp = netdev_priv(ndev);
1176 int subqueue = skb_get_queue_mapping(skb);
1177 struct knav_dma_desc *desc;
1178 int desc_count, ret = 0;
1179
1180 if (unlikely(skb->len <= 0)) {
1181 dev_kfree_skb(skb);
1182 return NETDEV_TX_OK;
1183 }
1184
1185 if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
1186 ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
1187 if (ret < 0) {
1188 /* If we get here, the skb has already been dropped */
1189 dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
1190 ret);
1191 ndev->stats.tx_dropped++;
1192 return ret;
1193 }
1194 skb->len = NETCP_MIN_PACKET_SIZE;
1195 }
1196
1197 desc = netcp_tx_map_skb(skb, netcp);
1198 if (unlikely(!desc)) {
1199 netif_stop_subqueue(ndev, subqueue);
1200 ret = -ENOBUFS;
1201 goto drop;
1202 }
1203
1204 ret = netcp_tx_submit_skb(netcp, skb, desc);
1205 if (ret)
1206 goto drop;
1207
1208 ndev->trans_start = jiffies;
1209
1210 /* Check Tx pool count & stop subqueue if needed */
1211 desc_count = knav_pool_count(netcp->tx_pool);
1212 if (desc_count < netcp->tx_pause_threshold) {
1213 dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
1214 netif_stop_subqueue(ndev, subqueue);
1215 }
1216 return NETDEV_TX_OK;
1217
1218drop:
1219 ndev->stats.tx_dropped++;
1220 if (desc)
1221 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1222 dev_kfree_skb(skb);
1223 return ret;
1224}
1225
1226int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe)
1227{
1228 if (tx_pipe->dma_channel) {
1229 knav_dma_close_channel(tx_pipe->dma_channel);
1230 tx_pipe->dma_channel = NULL;
1231 }
1232 return 0;
1233}
1234EXPORT_SYMBOL_GPL(netcp_txpipe_close);
1235
1236int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
1237{
1238 struct device *dev = tx_pipe->netcp_device->device;
1239 struct knav_dma_cfg config;
1240 int ret = 0;
1241 u8 name[16];
1242
1243 memset(&config, 0, sizeof(config));
1244 config.direction = DMA_MEM_TO_DEV;
1245 config.u.tx.filt_einfo = false;
1246 config.u.tx.filt_pswords = false;
1247 config.u.tx.priority = DMA_PRIO_MED_L;
1248
1249 tx_pipe->dma_channel = knav_dma_open_channel(dev,
1250 tx_pipe->dma_chan_name, &config);
1251 if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) {
1252 dev_err(dev, "failed opening tx chan(%s)\n",
1253 tx_pipe->dma_chan_name);
1254 goto err;
1255 }
1256
1257 snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev));
1258 tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
1259 KNAV_QUEUE_SHARED);
1260 if (IS_ERR(tx_pipe->dma_queue)) {
1261 dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
1262 name, ret);
1263 ret = PTR_ERR(tx_pipe->dma_queue);
1264 goto err;
1265 }
1266
1267 dev_dbg(dev, "opened tx pipe %s\n", name);
1268 return 0;
1269
1270err:
1271 if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
1272 knav_dma_close_channel(tx_pipe->dma_channel);
1273 tx_pipe->dma_channel = NULL;
1274 return ret;
1275}
1276EXPORT_SYMBOL_GPL(netcp_txpipe_open);
1277
1278int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
1279 struct netcp_device *netcp_device,
1280 const char *dma_chan_name, unsigned int dma_queue_id)
1281{
1282 memset(tx_pipe, 0, sizeof(*tx_pipe));
1283 tx_pipe->netcp_device = netcp_device;
1284 tx_pipe->dma_chan_name = dma_chan_name;
1285 tx_pipe->dma_queue_id = dma_queue_id;
1286 return 0;
1287}
1288EXPORT_SYMBOL_GPL(netcp_txpipe_init);
1289
1290static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp,
1291 const u8 *addr,
1292 enum netcp_addr_type type)
1293{
1294 struct netcp_addr *naddr;
1295
1296 list_for_each_entry(naddr, &netcp->addr_list, node) {
1297 if (naddr->type != type)
1298 continue;
1299 if (addr && memcmp(addr, naddr->addr, ETH_ALEN))
1300 continue;
1301 return naddr;
1302 }
1303
1304 return NULL;
1305}
1306
1307static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
1308 const u8 *addr,
1309 enum netcp_addr_type type)
1310{
1311 struct netcp_addr *naddr;
1312
1313 naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC);
1314 if (!naddr)
1315 return NULL;
1316
1317 naddr->type = type;
1318 naddr->flags = 0;
1319 naddr->netcp = netcp;
1320 if (addr)
1321 ether_addr_copy(naddr->addr, addr);
1322 else
1323 memset(naddr->addr, 0, ETH_ALEN);
1324 list_add_tail(&naddr->node, &netcp->addr_list);
1325
1326 return naddr;
1327}
1328
1329static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr)
1330{
1331 list_del(&naddr->node);
1332 devm_kfree(netcp->dev, naddr);
1333}
1334
1335static void netcp_addr_clear_mark(struct netcp_intf *netcp)
1336{
1337 struct netcp_addr *naddr;
1338
1339 list_for_each_entry(naddr, &netcp->addr_list, node)
1340 naddr->flags = 0;
1341}
1342
1343static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr,
1344 enum netcp_addr_type type)
1345{
1346 struct netcp_addr *naddr;
1347
1348 naddr = netcp_addr_find(netcp, addr, type);
1349 if (naddr) {
1350 naddr->flags |= ADDR_VALID;
1351 return;
1352 }
1353
1354 naddr = netcp_addr_add(netcp, addr, type);
1355 if (!WARN_ON(!naddr))
1356 naddr->flags |= ADDR_NEW;
1357}
1358
1359static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1360{
1361 struct netcp_addr *naddr, *tmp;
1362 struct netcp_intf_modpriv *priv;
1363 struct netcp_module *module;
1364 int error;
1365
1366 list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1367 if (naddr->flags & (ADDR_VALID | ADDR_NEW))
1368 continue;
1369 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
1370 naddr->addr, naddr->type);
1371 mutex_lock(&netcp_modules_lock);
1372 for_each_module(netcp, priv) {
1373 module = priv->netcp_module;
1374 if (!module->del_addr)
1375 continue;
1376 error = module->del_addr(priv->module_priv,
1377 naddr);
1378 WARN_ON(error);
1379 }
1380 mutex_unlock(&netcp_modules_lock);
1381 netcp_addr_del(netcp, naddr);
1382 }
1383}
1384
1385static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1386{
1387 struct netcp_addr *naddr, *tmp;
1388 struct netcp_intf_modpriv *priv;
1389 struct netcp_module *module;
1390 int error;
1391
1392 list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1393 if (!(naddr->flags & ADDR_NEW))
1394 continue;
1395 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
1396 naddr->addr, naddr->type);
1397 mutex_lock(&netcp_modules_lock);
1398 for_each_module(netcp, priv) {
1399 module = priv->netcp_module;
1400 if (!module->add_addr)
1401 continue;
1402 error = module->add_addr(priv->module_priv, naddr);
1403 WARN_ON(error);
1404 }
1405 mutex_unlock(&netcp_modules_lock);
1406 }
1407}
1408
1409static void netcp_set_rx_mode(struct net_device *ndev)
1410{
1411 struct netcp_intf *netcp = netdev_priv(ndev);
1412 struct netdev_hw_addr *ndev_addr;
1413 bool promisc;
1414
1415 promisc = (ndev->flags & IFF_PROMISC ||
1416 ndev->flags & IFF_ALLMULTI ||
1417 netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
1418
1419 /* first clear all marks */
1420 netcp_addr_clear_mark(netcp);
1421
1422 /* next add new entries, mark existing ones */
1423 netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST);
1424 for_each_dev_addr(ndev, ndev_addr)
1425 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV);
1426 netdev_for_each_uc_addr(ndev_addr, ndev)
1427 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST);
1428 netdev_for_each_mc_addr(ndev_addr, ndev)
1429 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST);
1430
1431 if (promisc)
1432 netcp_addr_add_mark(netcp, NULL, ADDR_ANY);
1433
1434 /* finally sweep and callout into modules */
1435 netcp_addr_sweep_del(netcp);
1436 netcp_addr_sweep_add(netcp);
1437}
1438
1439static void netcp_free_navigator_resources(struct netcp_intf *netcp)
1440{
1441 int i;
1442
1443 if (netcp->rx_channel) {
1444 knav_dma_close_channel(netcp->rx_channel);
1445 netcp->rx_channel = NULL;
1446 }
1447
1448 if (!IS_ERR_OR_NULL(netcp->rx_pool))
1449 netcp_rxpool_free(netcp);
1450
1451 if (!IS_ERR_OR_NULL(netcp->rx_queue)) {
1452 knav_queue_close(netcp->rx_queue);
1453 netcp->rx_queue = NULL;
1454 }
1455
1456 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
1457 !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) {
1458 knav_queue_close(netcp->rx_fdq[i]);
1459 netcp->rx_fdq[i] = NULL;
1460 }
1461
1462 if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) {
1463 knav_queue_close(netcp->tx_compl_q);
1464 netcp->tx_compl_q = NULL;
1465 }
1466
1467 if (!IS_ERR_OR_NULL(netcp->tx_pool)) {
1468 knav_pool_destroy(netcp->tx_pool);
1469 netcp->tx_pool = NULL;
1470 }
1471}
1472
1473static int netcp_setup_navigator_resources(struct net_device *ndev)
1474{
1475 struct netcp_intf *netcp = netdev_priv(ndev);
1476 struct knav_queue_notify_config notify_cfg;
1477 struct knav_dma_cfg config;
1478 u32 last_fdq = 0;
1479 u8 name[16];
1480 int ret;
1481 int i;
1482
1483 /* Create Rx/Tx descriptor pools */
1484 snprintf(name, sizeof(name), "rx-pool-%s", ndev->name);
1485 netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
1486 netcp->rx_pool_region_id);
1487 if (IS_ERR_OR_NULL(netcp->rx_pool)) {
1488 dev_err(netcp->ndev_dev, "Couldn't create rx pool\n");
1489 ret = PTR_ERR(netcp->rx_pool);
1490 goto fail;
1491 }
1492
1493 snprintf(name, sizeof(name), "tx-pool-%s", ndev->name);
1494 netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size,
1495 netcp->tx_pool_region_id);
1496 if (IS_ERR_OR_NULL(netcp->tx_pool)) {
1497 dev_err(netcp->ndev_dev, "Couldn't create tx pool\n");
1498 ret = PTR_ERR(netcp->tx_pool);
1499 goto fail;
1500 }
1501
1502 /* open Tx completion queue */
1503 snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
1504 netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
1505 if (IS_ERR_OR_NULL(netcp->tx_compl_q)) {
1506 ret = PTR_ERR(netcp->tx_compl_q);
1507 goto fail;
1508 }
1509 netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q);
1510
1511 /* Set notification for Tx completion */
1512 notify_cfg.fn = netcp_tx_notify;
1513 notify_cfg.fn_arg = netcp;
1514 ret = knav_queue_device_control(netcp->tx_compl_q,
1515 KNAV_QUEUE_SET_NOTIFIER,
1516 (unsigned long)&notify_cfg);
1517 if (ret)
1518 goto fail;
1519
1520 knav_queue_disable_notify(netcp->tx_compl_q);
1521
1522 /* open Rx completion queue */
1523 snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
1524 netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
1525 if (IS_ERR_OR_NULL(netcp->rx_queue)) {
1526 ret = PTR_ERR(netcp->rx_queue);
1527 goto fail;
1528 }
1529 netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue);
1530
1531 /* Set notification for Rx completion */
1532 notify_cfg.fn = netcp_rx_notify;
1533 notify_cfg.fn_arg = netcp;
1534 ret = knav_queue_device_control(netcp->rx_queue,
1535 KNAV_QUEUE_SET_NOTIFIER,
1536 (unsigned long)&notify_cfg);
1537 if (ret)
1538 goto fail;
1539
1540 knav_queue_disable_notify(netcp->rx_queue);
1541
1542 /* open Rx FDQs */
1543 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
1544 netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
1545 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
1546 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
1547 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
1548 ret = PTR_ERR(netcp->rx_fdq[i]);
1549 goto fail;
1550 }
1551 }
1552
1553 memset(&config, 0, sizeof(config));
1554 config.direction = DMA_DEV_TO_MEM;
1555 config.u.rx.einfo_present = true;
1556 config.u.rx.psinfo_present = true;
1557 config.u.rx.err_mode = DMA_DROP;
1558 config.u.rx.desc_type = DMA_DESC_HOST;
1559 config.u.rx.psinfo_at_sop = false;
1560 config.u.rx.sop_offset = NETCP_SOP_OFFSET;
1561 config.u.rx.dst_q = netcp->rx_queue_id;
1562 config.u.rx.thresh = DMA_THRESH_NONE;
1563
1564 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
1565 if (netcp->rx_fdq[i])
1566 last_fdq = knav_queue_get_id(netcp->rx_fdq[i]);
1567 config.u.rx.fdq[i] = last_fdq;
1568 }
1569
1570 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
1571 netcp->dma_chan_name, &config);
1572 if (IS_ERR_OR_NULL(netcp->rx_channel)) {
1573 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
1574 netcp->dma_chan_name);
1575 goto fail;
1576 }
1577
1578 dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel);
1579 return 0;
1580
1581fail:
1582 netcp_free_navigator_resources(netcp);
1583 return ret;
1584}
1585
1586/* Open the device */
1587static int netcp_ndo_open(struct net_device *ndev)
1588{
1589 struct netcp_intf *netcp = netdev_priv(ndev);
1590 struct netcp_intf_modpriv *intf_modpriv;
1591 struct netcp_module *module;
1592 int ret;
1593
1594 netif_carrier_off(ndev);
1595 ret = netcp_setup_navigator_resources(ndev);
1596 if (ret) {
1597 dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n");
1598 goto fail;
1599 }
1600
1601 mutex_lock(&netcp_modules_lock);
1602 for_each_module(netcp, intf_modpriv) {
1603 module = intf_modpriv->netcp_module;
1604 if (module->open) {
1605 ret = module->open(intf_modpriv->module_priv, ndev);
1606 if (ret != 0) {
1607 dev_err(netcp->ndev_dev, "module open failed\n");
1608 goto fail_open;
1609 }
1610 }
1611 }
1612 mutex_unlock(&netcp_modules_lock);
1613
1614 netcp_rxpool_refill(netcp);
1615 napi_enable(&netcp->rx_napi);
1616 napi_enable(&netcp->tx_napi);
1617 knav_queue_enable_notify(netcp->tx_compl_q);
1618 knav_queue_enable_notify(netcp->rx_queue);
1619 netif_tx_wake_all_queues(ndev);
1620 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
1621 return 0;
1622
1623fail_open:
1624 for_each_module(netcp, intf_modpriv) {
1625 module = intf_modpriv->netcp_module;
1626 if (module->close)
1627 module->close(intf_modpriv->module_priv, ndev);
1628 }
1629 mutex_unlock(&netcp_modules_lock);
1630
1631fail:
1632 netcp_free_navigator_resources(netcp);
1633 return ret;
1634}
1635
1636/* Close the device */
1637static int netcp_ndo_stop(struct net_device *ndev)
1638{
1639 struct netcp_intf *netcp = netdev_priv(ndev);
1640 struct netcp_intf_modpriv *intf_modpriv;
1641 struct netcp_module *module;
1642 int err = 0;
1643
1644 netif_tx_stop_all_queues(ndev);
1645 netif_carrier_off(ndev);
1646 netcp_addr_clear_mark(netcp);
1647 netcp_addr_sweep_del(netcp);
1648 knav_queue_disable_notify(netcp->rx_queue);
1649 knav_queue_disable_notify(netcp->tx_compl_q);
1650 napi_disable(&netcp->rx_napi);
1651 napi_disable(&netcp->tx_napi);
1652
1653 mutex_lock(&netcp_modules_lock);
1654 for_each_module(netcp, intf_modpriv) {
1655 module = intf_modpriv->netcp_module;
1656 if (module->close) {
1657 err = module->close(intf_modpriv->module_priv, ndev);
1658 if (err != 0)
1659 dev_err(netcp->ndev_dev, "Close failed\n");
1660 }
1661 }
1662 mutex_unlock(&netcp_modules_lock);
1663
1664 /* Recycle Rx descriptors from completion queue */
1665 netcp_empty_rx_queue(netcp);
1666
1667 /* Recycle Tx descriptors from completion queue */
1668 netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1669
1670 if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size)
1671 dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n",
1672 netcp->tx_pool_size - knav_pool_count(netcp->tx_pool));
1673
1674 netcp_free_navigator_resources(netcp);
1675 dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name);
1676 return 0;
1677}
1678
1679static int netcp_ndo_ioctl(struct net_device *ndev,
1680 struct ifreq *req, int cmd)
1681{
1682 struct netcp_intf *netcp = netdev_priv(ndev);
1683 struct netcp_intf_modpriv *intf_modpriv;
1684 struct netcp_module *module;
1685 int ret = -1, err = -EOPNOTSUPP;
1686
1687 if (!netif_running(ndev))
1688 return -EINVAL;
1689
1690 mutex_lock(&netcp_modules_lock);
1691 for_each_module(netcp, intf_modpriv) {
1692 module = intf_modpriv->netcp_module;
1693 if (!module->ioctl)
1694 continue;
1695
1696 err = module->ioctl(intf_modpriv->module_priv, req, cmd);
1697 if ((err < 0) && (err != -EOPNOTSUPP)) {
1698 ret = err;
1699 goto out;
1700 }
1701 if (err == 0)
1702 ret = err;
1703 }
1704
1705out:
1706 mutex_unlock(&netcp_modules_lock);
1707 return (ret == 0) ? 0 : err;
1708}
1709
1710static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu)
1711{
1712 struct netcp_intf *netcp = netdev_priv(ndev);
1713
1714 /* MTU < 68 is an error for IPv4 traffic */
1715 if ((new_mtu < 68) ||
1716 (new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) {
1717 dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu);
1718 return -EINVAL;
1719 }
1720
1721 ndev->mtu = new_mtu;
1722 return 0;
1723}
1724
1725static void netcp_ndo_tx_timeout(struct net_device *ndev)
1726{
1727 struct netcp_intf *netcp = netdev_priv(ndev);
1728 unsigned int descs = knav_pool_count(netcp->tx_pool);
1729
1730 dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
1731 netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1732 ndev->trans_start = jiffies;
1733 netif_tx_wake_all_queues(ndev);
1734}
1735
1736static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1737{
1738 struct netcp_intf *netcp = netdev_priv(ndev);
1739 struct netcp_intf_modpriv *intf_modpriv;
1740 struct netcp_module *module;
1741 int err = 0;
1742
1743 dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
1744
1745 mutex_lock(&netcp_modules_lock);
1746 for_each_module(netcp, intf_modpriv) {
1747 module = intf_modpriv->netcp_module;
1748 if ((module->add_vid) && (vid != 0)) {
1749 err = module->add_vid(intf_modpriv->module_priv, vid);
1750 if (err != 0) {
1751 dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n",
1752 vid);
1753 break;
1754 }
1755 }
1756 }
1757 mutex_unlock(&netcp_modules_lock);
1758 return err;
1759}
1760
1761static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1762{
1763 struct netcp_intf *netcp = netdev_priv(ndev);
1764 struct netcp_intf_modpriv *intf_modpriv;
1765 struct netcp_module *module;
1766 int err = 0;
1767
1768 dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
1769
1770 mutex_lock(&netcp_modules_lock);
1771 for_each_module(netcp, intf_modpriv) {
1772 module = intf_modpriv->netcp_module;
1773 if (module->del_vid) {
1774 err = module->del_vid(intf_modpriv->module_priv, vid);
1775 if (err != 0) {
1776 dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n",
1777 vid);
1778 break;
1779 }
1780 }
1781 }
1782 mutex_unlock(&netcp_modules_lock);
1783 return err;
1784}
1785
1786static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
1787 void *accel_priv,
1788 select_queue_fallback_t fallback)
1789{
1790 return 0;
1791}
1792
1793static int netcp_setup_tc(struct net_device *dev, u8 num_tc)
1794{
1795 int i;
1796
1797 /* setup tc must be called under rtnl lock */
1798 ASSERT_RTNL();
1799
1800 /* Sanity-check the number of traffic classes requested */
1801 if ((dev->real_num_tx_queues <= 1) ||
1802 (dev->real_num_tx_queues < num_tc))
1803 return -EINVAL;
1804
1805 /* Configure traffic class to queue mappings */
1806 if (num_tc) {
1807 netdev_set_num_tc(dev, num_tc);
1808 for (i = 0; i < num_tc; i++)
1809 netdev_set_tc_queue(dev, i, 1, i);
1810 } else {
1811 netdev_reset_tc(dev);
1812 }
1813
1814 return 0;
1815}
1816
1817static const struct net_device_ops netcp_netdev_ops = {
1818 .ndo_open = netcp_ndo_open,
1819 .ndo_stop = netcp_ndo_stop,
1820 .ndo_start_xmit = netcp_ndo_start_xmit,
1821 .ndo_set_rx_mode = netcp_set_rx_mode,
1822 .ndo_do_ioctl = netcp_ndo_ioctl,
1823 .ndo_change_mtu = netcp_ndo_change_mtu,
1824 .ndo_set_mac_address = eth_mac_addr,
1825 .ndo_validate_addr = eth_validate_addr,
1826 .ndo_vlan_rx_add_vid = netcp_rx_add_vid,
1827 .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
1828 .ndo_tx_timeout = netcp_ndo_tx_timeout,
1829 .ndo_select_queue = netcp_select_queue,
1830 .ndo_setup_tc = netcp_setup_tc,
1831};
1832
1833static int netcp_create_interface(struct netcp_device *netcp_device,
1834 struct device_node *node_interface)
1835{
1836 struct device *dev = netcp_device->device;
1837 struct device_node *node = dev->of_node;
1838 struct netcp_intf *netcp;
1839 struct net_device *ndev;
1840 resource_size_t size;
1841 struct resource res;
1842 void __iomem *efuse = NULL;
1843 u32 efuse_mac = 0;
1844 const void *mac_addr;
1845 u8 efuse_mac_addr[6];
1846 u32 temp[2];
1847 int ret = 0;
1848
1849 ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1);
1850 if (!ndev) {
1851 dev_err(dev, "Error allocating netdev\n");
1852 return -ENOMEM;
1853 }
1854
1855 ndev->features |= NETIF_F_SG;
1856 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1857 ndev->hw_features = ndev->features;
1858 ndev->vlan_features |= NETIF_F_SG;
1859
1860 netcp = netdev_priv(ndev);
1861 spin_lock_init(&netcp->lock);
1862 INIT_LIST_HEAD(&netcp->module_head);
1863 INIT_LIST_HEAD(&netcp->txhook_list_head);
1864 INIT_LIST_HEAD(&netcp->rxhook_list_head);
1865 INIT_LIST_HEAD(&netcp->addr_list);
1866 netcp->netcp_device = netcp_device;
1867 netcp->dev = netcp_device->device;
1868 netcp->ndev = ndev;
1869 netcp->ndev_dev = &ndev->dev;
1870 netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG);
1871 netcp->tx_pause_threshold = MAX_SKB_FRAGS;
1872 netcp->tx_resume_threshold = netcp->tx_pause_threshold;
1873 netcp->node_interface = node_interface;
1874
1875 ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac);
1876 if (efuse_mac) {
1877 if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
1878 dev_err(dev, "could not find efuse-mac reg resource\n");
1879 ret = -ENODEV;
1880 goto quit;
1881 }
1882 size = resource_size(&res);
1883
1884 if (!devm_request_mem_region(dev, res.start, size,
1885 dev_name(dev))) {
1886 dev_err(dev, "could not reserve resource\n");
1887 ret = -ENOMEM;
1888 goto quit;
1889 }
1890
1891 efuse = devm_ioremap_nocache(dev, res.start, size);
1892 if (!efuse) {
1893 dev_err(dev, "could not map resource\n");
1894 devm_release_mem_region(dev, res.start, size);
1895 ret = -ENOMEM;
1896 goto quit;
1897 }
1898
1899 emac_arch_get_mac_addr(efuse_mac_addr, efuse);
1900 if (is_valid_ether_addr(efuse_mac_addr))
1901 ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
1902 else
1903 random_ether_addr(ndev->dev_addr);
1904
1905 devm_iounmap(dev, efuse);
1906 devm_release_mem_region(dev, res.start, size);
1907 } else {
1908 mac_addr = of_get_mac_address(node_interface);
1909 if (mac_addr)
1910 ether_addr_copy(ndev->dev_addr, mac_addr);
1911 else
1912 random_ether_addr(ndev->dev_addr);
1913 }
1914
1915 ret = of_property_read_string(node_interface, "rx-channel",
1916 &netcp->dma_chan_name);
1917 if (ret < 0) {
1918 dev_err(dev, "missing \"rx-channel\" parameter\n");
1919 ret = -ENODEV;
1920 goto quit;
1921 }
1922
1923 ret = of_property_read_u32(node_interface, "rx-queue",
1924 &netcp->rx_queue_id);
1925 if (ret < 0) {
1926 dev_warn(dev, "missing \"rx-queue\" parameter\n");
1927 netcp->rx_queue_id = KNAV_QUEUE_QPEND;
1928 }
1929
1930 ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
1931 netcp->rx_queue_depths,
1932 KNAV_DMA_FDQ_PER_CHAN);
1933 if (ret < 0) {
1934 dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
1935 netcp->rx_queue_depths[0] = 128;
1936 }
1937
1938 ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
1939 netcp->rx_buffer_sizes,
1940 KNAV_DMA_FDQ_PER_CHAN);
1941 if (ret) {
1942 dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
1943 netcp->rx_buffer_sizes[0] = 1536;
1944 }
1945
1946 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
1947 if (ret < 0) {
1948 dev_err(dev, "missing \"rx-pool\" parameter\n");
1949 ret = -ENODEV;
1950 goto quit;
1951 }
1952 netcp->rx_pool_size = temp[0];
1953 netcp->rx_pool_region_id = temp[1];
1954
1955 ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2);
1956 if (ret < 0) {
1957 dev_err(dev, "missing \"tx-pool\" parameter\n");
1958 ret = -ENODEV;
1959 goto quit;
1960 }
1961 netcp->tx_pool_size = temp[0];
1962 netcp->tx_pool_region_id = temp[1];
1963
1964 if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
1965 dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
1966 MAX_SKB_FRAGS);
1967 ret = -ENODEV;
1968 goto quit;
1969 }
1970
1971 ret = of_property_read_u32(node_interface, "tx-completion-queue",
1972 &netcp->tx_compl_qid);
1973 if (ret < 0) {
1974 dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
1975 netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
1976 }
1977
1978 /* NAPI register */
1979 netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
1980 netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
1981
1982 /* Register the network device */
1983 ndev->dev_id = 0;
1984 ndev->watchdog_timeo = NETCP_TX_TIMEOUT;
1985 ndev->netdev_ops = &netcp_netdev_ops;
1986 SET_NETDEV_DEV(ndev, dev);
1987
1988 list_add_tail(&netcp->interface_list, &netcp_device->interface_head);
1989 return 0;
1990
1991quit:
1992 free_netdev(ndev);
1993 return ret;
1994}
1995
1996static void netcp_delete_interface(struct netcp_device *netcp_device,
1997 struct net_device *ndev)
1998{
1999 struct netcp_intf_modpriv *intf_modpriv, *tmp;
2000 struct netcp_intf *netcp = netdev_priv(ndev);
2001 struct netcp_module *module;
2002
2003 dev_dbg(netcp_device->device, "Removing interface \"%s\"\n",
2004 ndev->name);
2005
2006 /* Notify each of the modules that the interface is going away */
2007 list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
2008 intf_list) {
2009 module = intf_modpriv->netcp_module;
2010 dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
2011 module->name);
2012 if (module->release)
2013 module->release(intf_modpriv->module_priv);
2014 list_del(&intf_modpriv->intf_list);
2015 kfree(intf_modpriv);
2016 }
2017 WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
2018 ndev->name);
2019
2020 list_del(&netcp->interface_list);
2021
2022 of_node_put(netcp->node_interface);
2023 unregister_netdev(ndev);
2024 netif_napi_del(&netcp->rx_napi);
2025 free_netdev(ndev);
2026}
2027
2028static int netcp_probe(struct platform_device *pdev)
2029{
2030 struct device_node *node = pdev->dev.of_node;
2031 struct netcp_intf *netcp_intf, *netcp_tmp;
2032 struct device_node *child, *interfaces;
2033 struct netcp_device *netcp_device;
2034 struct device *dev = &pdev->dev;
2035 struct netcp_module *module;
2036 int ret;
2037
2038 if (!node) {
2039 dev_err(dev, "could not find device info\n");
2040 return -ENODEV;
2041 }
2042
2043 /* Allocate a new NETCP device instance */
2044 netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL);
2045 if (!netcp_device)
2046 return -ENOMEM;
2047
2048 pm_runtime_enable(&pdev->dev);
2049 ret = pm_runtime_get_sync(&pdev->dev);
2050 if (ret < 0) {
2051 dev_err(dev, "Failed to enable NETCP power-domain\n");
2052 pm_runtime_disable(&pdev->dev);
2053 return ret;
2054 }
2055
2056 /* Initialize the NETCP device instance */
2057 INIT_LIST_HEAD(&netcp_device->interface_head);
2058 INIT_LIST_HEAD(&netcp_device->modpriv_head);
2059 netcp_device->device = dev;
2060 platform_set_drvdata(pdev, netcp_device);
2061
2062 /* create interfaces */
2063 interfaces = of_get_child_by_name(node, "netcp-interfaces");
2064 if (!interfaces) {
2065 dev_err(dev, "could not find netcp-interfaces node\n");
2066 ret = -ENODEV;
2067 goto probe_quit;
2068 }
2069
2070 for_each_available_child_of_node(interfaces, child) {
2071 ret = netcp_create_interface(netcp_device, child);
2072 if (ret) {
2073 dev_err(dev, "could not create interface(%s)\n",
2074 child->name);
2075 goto probe_quit_interface;
2076 }
2077 }
2078
2079 /* Add the device instance to the list */
2080 list_add_tail(&netcp_device->device_list, &netcp_devices);
2081
2082 /* Probe & attach any modules already registered */
2083 mutex_lock(&netcp_modules_lock);
2084 for_each_netcp_module(module) {
2085 ret = netcp_module_probe(netcp_device, module);
2086 if (ret < 0)
2087 dev_err(dev, "module(%s) probe failed\n", module->name);
2088 }
2089 mutex_unlock(&netcp_modules_lock);
2090 return 0;
2091
2092probe_quit_interface:
2093 list_for_each_entry_safe(netcp_intf, netcp_tmp,
2094 &netcp_device->interface_head,
2095 interface_list) {
2096 netcp_delete_interface(netcp_device, netcp_intf->ndev);
2097 }
2098
2099probe_quit:
2100 pm_runtime_put_sync(&pdev->dev);
2101 pm_runtime_disable(&pdev->dev);
2102 platform_set_drvdata(pdev, NULL);
2103 return ret;
2104}
2105
2106static int netcp_remove(struct platform_device *pdev)
2107{
2108 struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2109 struct netcp_inst_modpriv *inst_modpriv, *tmp;
2110 struct netcp_module *module;
2111
2112 list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head,
2113 inst_list) {
2114 module = inst_modpriv->netcp_module;
2115 dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
2116 module->remove(netcp_device, inst_modpriv->module_priv);
2117 list_del(&inst_modpriv->inst_list);
2118 kfree(inst_modpriv);
2119 }
2120 WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
2121 pdev->name);
2122
2123 devm_kfree(&pdev->dev, netcp_device);
2124 pm_runtime_put_sync(&pdev->dev);
2125 pm_runtime_disable(&pdev->dev);
2126 platform_set_drvdata(pdev, NULL);
2127 return 0;
2128}
2129
2130static struct of_device_id of_match[] = {
2131 { .compatible = "ti,netcp-1.0", },
2132 {},
2133};
2134MODULE_DEVICE_TABLE(of, of_match);
2135
2136static struct platform_driver netcp_driver = {
2137 .driver = {
2138 .name = "netcp-1.0",
2139 .owner = THIS_MODULE,
2140 .of_match_table = of_match,
2141 },
2142 .probe = netcp_probe,
2143 .remove = netcp_remove,
2144};
2145module_platform_driver(netcp_driver);
2146
2147MODULE_LICENSE("GPL v2");
2148MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2149MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
new file mode 100644
index 000000000000..84f5ce525750
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -0,0 +1,2159 @@
1/*
2 * Keystone GBE and XGBE subsystem code
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Wingman Kwok <w-kwok2@ti.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation version 2.
14 *
15 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
16 * kind, whether express or implied; without even the implied warranty
17 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/io.h>
22#include <linux/module.h>
23#include <linux/of_mdio.h>
24#include <linux/of_address.h>
25#include <linux/if_vlan.h>
26#include <linux/ethtool.h>
27
28#include "cpsw_ale.h"
29#include "netcp.h"
30
31#define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
32#define NETCP_DRIVER_VERSION "v1.0"
33
34#define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
35#define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
36#define GBE_MINOR_VERSION(reg) (reg & 0xff)
37#define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
38
39/* 1G Ethernet SS defines */
40#define GBE_MODULE_NAME "netcp-gbe"
41#define GBE_SS_VERSION_14 0x4ed21104
42
43#define GBE13_SGMII_MODULE_OFFSET 0x100
44#define GBE13_SGMII34_MODULE_OFFSET 0x400
45#define GBE13_SWITCH_MODULE_OFFSET 0x800
46#define GBE13_HOST_PORT_OFFSET 0x834
47#define GBE13_SLAVE_PORT_OFFSET 0x860
48#define GBE13_EMAC_OFFSET 0x900
49#define GBE13_SLAVE_PORT2_OFFSET 0xa00
50#define GBE13_HW_STATS_OFFSET 0xb00
51#define GBE13_ALE_OFFSET 0xe00
52#define GBE13_HOST_PORT_NUM 0
53#define GBE13_NUM_SLAVES 4
54#define GBE13_NUM_ALE_PORTS (GBE13_NUM_SLAVES + 1)
55#define GBE13_NUM_ALE_ENTRIES 1024
56
57/* 10G Ethernet SS defines */
58#define XGBE_MODULE_NAME "netcp-xgbe"
59#define XGBE_SS_VERSION_10 0x4ee42100
60
61#define XGBE_SERDES_REG_INDEX 1
62#define XGBE10_SGMII_MODULE_OFFSET 0x100
63#define XGBE10_SWITCH_MODULE_OFFSET 0x1000
64#define XGBE10_HOST_PORT_OFFSET 0x1034
65#define XGBE10_SLAVE_PORT_OFFSET 0x1064
66#define XGBE10_EMAC_OFFSET 0x1400
67#define XGBE10_ALE_OFFSET 0x1700
68#define XGBE10_HW_STATS_OFFSET 0x1800
69#define XGBE10_HOST_PORT_NUM 0
70#define XGBE10_NUM_SLAVES 2
71#define XGBE10_NUM_ALE_PORTS (XGBE10_NUM_SLAVES + 1)
72#define XGBE10_NUM_ALE_ENTRIES 1024
73
74#define GBE_TIMER_INTERVAL (HZ / 2)
75
76/* Soft reset register values */
77#define SOFT_RESET_MASK BIT(0)
78#define SOFT_RESET BIT(0)
79#define DEVICE_EMACSL_RESET_POLL_COUNT 100
80#define GMACSL_RET_WARN_RESET_INCOMPLETE -2
81
82#define MACSL_RX_ENABLE_CSF BIT(23)
83#define MACSL_ENABLE_EXT_CTL BIT(18)
84#define MACSL_XGMII_ENABLE BIT(13)
85#define MACSL_XGIG_MODE BIT(8)
86#define MACSL_GIG_MODE BIT(7)
87#define MACSL_GMII_ENABLE BIT(5)
88#define MACSL_FULLDUPLEX BIT(0)
89
90#define GBE_CTL_P0_ENABLE BIT(2)
91#define GBE_REG_VAL_STAT_ENABLE_ALL 0xff
92#define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
93#define GBE_STATS_CD_SEL BIT(28)
94
95#define GBE_PORT_MASK(x) (BIT(x) - 1)
96#define GBE_MASK_NO_PORTS 0
97
98#define GBE_DEF_1G_MAC_CONTROL \
99 (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
100 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
101
102#define GBE_DEF_10G_MAC_CONTROL \
103 (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
104 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
105
106#define GBE_STATSA_MODULE 0
107#define GBE_STATSB_MODULE 1
108#define GBE_STATSC_MODULE 2
109#define GBE_STATSD_MODULE 3
110
111#define XGBE_STATS0_MODULE 0
112#define XGBE_STATS1_MODULE 1
113#define XGBE_STATS2_MODULE 2
114
115#define MAX_SLAVES GBE13_NUM_SLAVES
116/* s: 0-based slave_port */
117#define SGMII_BASE(s) \
118 (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
119
120#define GBE_TX_QUEUE 648
121#define GBE_TXHOOK_ORDER 0
122#define GBE_DEFAULT_ALE_AGEOUT 30
123#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
124#define NETCP_LINK_STATE_INVALID -1
125
126#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
127 offsetof(struct gbe##_##rb, rn)
128#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
129 offsetof(struct xgbe##_##rb, rn)
130#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
131
132struct xgbe_ss_regs {
133 u32 id_ver;
134 u32 synce_count;
135 u32 synce_mux;
136 u32 control;
137};
138
139struct xgbe_switch_regs {
140 u32 id_ver;
141 u32 control;
142 u32 emcontrol;
143 u32 stat_port_en;
144 u32 ptype;
145 u32 soft_idle;
146 u32 thru_rate;
147 u32 gap_thresh;
148 u32 tx_start_wds;
149 u32 flow_control;
150 u32 cppi_thresh;
151};
152
153struct xgbe_port_regs {
154 u32 blk_cnt;
155 u32 port_vlan;
156 u32 tx_pri_map;
157 u32 sa_lo;
158 u32 sa_hi;
159 u32 ts_ctl;
160 u32 ts_seq_ltype;
161 u32 ts_vlan;
162 u32 ts_ctl_ltype2;
163 u32 ts_ctl2;
164 u32 control;
165};
166
167struct xgbe_host_port_regs {
168 u32 blk_cnt;
169 u32 port_vlan;
170 u32 tx_pri_map;
171 u32 src_id;
172 u32 rx_pri_map;
173 u32 rx_maxlen;
174};
175
176struct xgbe_emac_regs {
177 u32 id_ver;
178 u32 mac_control;
179 u32 mac_status;
180 u32 soft_reset;
181 u32 rx_maxlen;
182 u32 __reserved_0;
183 u32 rx_pause;
184 u32 tx_pause;
185 u32 em_control;
186 u32 __reserved_1;
187 u32 tx_gap;
188 u32 rsvd[4];
189};
190
191struct xgbe_host_hw_stats {
192 u32 rx_good_frames;
193 u32 rx_broadcast_frames;
194 u32 rx_multicast_frames;
195 u32 __rsvd_0[3];
196 u32 rx_oversized_frames;
197 u32 __rsvd_1;
198 u32 rx_undersized_frames;
199 u32 __rsvd_2;
200 u32 overrun_type4;
201 u32 overrun_type5;
202 u32 rx_bytes;
203 u32 tx_good_frames;
204 u32 tx_broadcast_frames;
205 u32 tx_multicast_frames;
206 u32 __rsvd_3[9];
207 u32 tx_bytes;
208 u32 tx_64byte_frames;
209 u32 tx_65_to_127byte_frames;
210 u32 tx_128_to_255byte_frames;
211 u32 tx_256_to_511byte_frames;
212 u32 tx_512_to_1023byte_frames;
213 u32 tx_1024byte_frames;
214 u32 net_bytes;
215 u32 rx_sof_overruns;
216 u32 rx_mof_overruns;
217 u32 rx_dma_overruns;
218};
219
220struct xgbe_hw_stats {
221 u32 rx_good_frames;
222 u32 rx_broadcast_frames;
223 u32 rx_multicast_frames;
224 u32 rx_pause_frames;
225 u32 rx_crc_errors;
226 u32 rx_align_code_errors;
227 u32 rx_oversized_frames;
228 u32 rx_jabber_frames;
229 u32 rx_undersized_frames;
230 u32 rx_fragments;
231 u32 overrun_type4;
232 u32 overrun_type5;
233 u32 rx_bytes;
234 u32 tx_good_frames;
235 u32 tx_broadcast_frames;
236 u32 tx_multicast_frames;
237 u32 tx_pause_frames;
238 u32 tx_deferred_frames;
239 u32 tx_collision_frames;
240 u32 tx_single_coll_frames;
241 u32 tx_mult_coll_frames;
242 u32 tx_excessive_collisions;
243 u32 tx_late_collisions;
244 u32 tx_underrun;
245 u32 tx_carrier_sense_errors;
246 u32 tx_bytes;
247 u32 tx_64byte_frames;
248 u32 tx_65_to_127byte_frames;
249 u32 tx_128_to_255byte_frames;
250 u32 tx_256_to_511byte_frames;
251 u32 tx_512_to_1023byte_frames;
252 u32 tx_1024byte_frames;
253 u32 net_bytes;
254 u32 rx_sof_overruns;
255 u32 rx_mof_overruns;
256 u32 rx_dma_overruns;
257};
258
259#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
260
261struct gbe_ss_regs {
262 u32 id_ver;
263 u32 synce_count;
264 u32 synce_mux;
265};
266
267struct gbe_ss_regs_ofs {
268 u16 id_ver;
269 u16 control;
270};
271
272struct gbe_switch_regs {
273 u32 id_ver;
274 u32 control;
275 u32 soft_reset;
276 u32 stat_port_en;
277 u32 ptype;
278 u32 soft_idle;
279 u32 thru_rate;
280 u32 gap_thresh;
281 u32 tx_start_wds;
282 u32 flow_control;
283};
284
285struct gbe_switch_regs_ofs {
286 u16 id_ver;
287 u16 control;
288 u16 soft_reset;
289 u16 emcontrol;
290 u16 stat_port_en;
291 u16 ptype;
292 u16 flow_control;
293};
294
295struct gbe_port_regs {
296 u32 max_blks;
297 u32 blk_cnt;
298 u32 port_vlan;
299 u32 tx_pri_map;
300 u32 sa_lo;
301 u32 sa_hi;
302 u32 ts_ctl;
303 u32 ts_seq_ltype;
304 u32 ts_vlan;
305 u32 ts_ctl_ltype2;
306 u32 ts_ctl2;
307};
308
309struct gbe_port_regs_ofs {
310 u16 port_vlan;
311 u16 tx_pri_map;
312 u16 sa_lo;
313 u16 sa_hi;
314 u16 ts_ctl;
315 u16 ts_seq_ltype;
316 u16 ts_vlan;
317 u16 ts_ctl_ltype2;
318 u16 ts_ctl2;
319};
320
321struct gbe_host_port_regs {
322 u32 src_id;
323 u32 port_vlan;
324 u32 rx_pri_map;
325 u32 rx_maxlen;
326};
327
328struct gbe_host_port_regs_ofs {
329 u16 port_vlan;
330 u16 tx_pri_map;
331 u16 rx_maxlen;
332};
333
334struct gbe_emac_regs {
335 u32 id_ver;
336 u32 mac_control;
337 u32 mac_status;
338 u32 soft_reset;
339 u32 rx_maxlen;
340 u32 __reserved_0;
341 u32 rx_pause;
342 u32 tx_pause;
343 u32 __reserved_1;
344 u32 rx_pri_map;
345 u32 rsvd[6];
346};
347
348struct gbe_emac_regs_ofs {
349 u16 mac_control;
350 u16 soft_reset;
351 u16 rx_maxlen;
352};
353
354struct gbe_hw_stats {
355 u32 rx_good_frames;
356 u32 rx_broadcast_frames;
357 u32 rx_multicast_frames;
358 u32 rx_pause_frames;
359 u32 rx_crc_errors;
360 u32 rx_align_code_errors;
361 u32 rx_oversized_frames;
362 u32 rx_jabber_frames;
363 u32 rx_undersized_frames;
364 u32 rx_fragments;
365 u32 __pad_0[2];
366 u32 rx_bytes;
367 u32 tx_good_frames;
368 u32 tx_broadcast_frames;
369 u32 tx_multicast_frames;
370 u32 tx_pause_frames;
371 u32 tx_deferred_frames;
372 u32 tx_collision_frames;
373 u32 tx_single_coll_frames;
374 u32 tx_mult_coll_frames;
375 u32 tx_excessive_collisions;
376 u32 tx_late_collisions;
377 u32 tx_underrun;
378 u32 tx_carrier_sense_errors;
379 u32 tx_bytes;
380 u32 tx_64byte_frames;
381 u32 tx_65_to_127byte_frames;
382 u32 tx_128_to_255byte_frames;
383 u32 tx_256_to_511byte_frames;
384 u32 tx_512_to_1023byte_frames;
385 u32 tx_1024byte_frames;
386 u32 net_bytes;
387 u32 rx_sof_overruns;
388 u32 rx_mof_overruns;
389 u32 rx_dma_overruns;
390};
391
392#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
393#define GBE13_NUM_HW_STATS_MOD 2
394#define XGBE10_NUM_HW_STATS_MOD 3
395#define GBE_MAX_HW_STAT_MODS 3
396#define GBE_HW_STATS_REG_MAP_SZ 0x100
397
398struct gbe_slave {
399 void __iomem *port_regs;
400 void __iomem *emac_regs;
401 struct gbe_port_regs_ofs port_regs_ofs;
402 struct gbe_emac_regs_ofs emac_regs_ofs;
403 int slave_num; /* 0 based logical number */
404 int port_num; /* actual port number */
405 atomic_t link_state;
406 bool open;
407 struct phy_device *phy;
408 u32 link_interface;
409 u32 mac_control;
410 u8 phy_port_t;
411 struct device_node *phy_node;
412 struct list_head slave_list;
413};
414
415struct gbe_priv {
416 struct device *dev;
417 struct netcp_device *netcp_device;
418 struct timer_list timer;
419 u32 num_slaves;
420 u32 ale_entries;
421 u32 ale_ports;
422 bool enable_ale;
423 struct netcp_tx_pipe tx_pipe;
424
425 int host_port;
426 u32 rx_packet_max;
427 u32 ss_version;
428
429 void __iomem *ss_regs;
430 void __iomem *switch_regs;
431 void __iomem *host_port_regs;
432 void __iomem *ale_reg;
433 void __iomem *sgmii_port_regs;
434 void __iomem *sgmii_port34_regs;
435 void __iomem *xgbe_serdes_regs;
436 void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
437
438 struct gbe_ss_regs_ofs ss_regs_ofs;
439 struct gbe_switch_regs_ofs switch_regs_ofs;
440 struct gbe_host_port_regs_ofs host_port_regs_ofs;
441
442 struct cpsw_ale *ale;
443 unsigned int tx_queue_id;
444 const char *dma_chan_name;
445
446 struct list_head gbe_intf_head;
447 struct list_head secondary_slaves;
448 struct net_device *dummy_ndev;
449
450 u64 *hw_stats;
451 const struct netcp_ethtool_stat *et_stats;
452 int num_et_stats;
453 /* Lock for updating the hwstats */
454 spinlock_t hw_stats_lock;
455};
456
457struct gbe_intf {
458 struct net_device *ndev;
459 struct device *dev;
460 struct gbe_priv *gbe_dev;
461 struct netcp_tx_pipe tx_pipe;
462 struct gbe_slave *slave;
463 struct list_head gbe_intf_list;
464 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
465};
466
467static struct netcp_module gbe_module;
468static struct netcp_module xgbe_module;
469
470/* Statistic management */
471struct netcp_ethtool_stat {
472 char desc[ETH_GSTRING_LEN];
473 int type;
474 u32 size;
475 int offset;
476};
477
478#define GBE_STATSA_INFO(field) "GBE_A:"#field, GBE_STATSA_MODULE,\
479 FIELD_SIZEOF(struct gbe_hw_stats, field), \
480 offsetof(struct gbe_hw_stats, field)
481
482#define GBE_STATSB_INFO(field) "GBE_B:"#field, GBE_STATSB_MODULE,\
483 FIELD_SIZEOF(struct gbe_hw_stats, field), \
484 offsetof(struct gbe_hw_stats, field)
485
486#define GBE_STATSC_INFO(field) "GBE_C:"#field, GBE_STATSC_MODULE,\
487 FIELD_SIZEOF(struct gbe_hw_stats, field), \
488 offsetof(struct gbe_hw_stats, field)
489
490#define GBE_STATSD_INFO(field) "GBE_D:"#field, GBE_STATSD_MODULE,\
491 FIELD_SIZEOF(struct gbe_hw_stats, field), \
492 offsetof(struct gbe_hw_stats, field)
493
494static const struct netcp_ethtool_stat gbe13_et_stats[] = {
495 /* GBE module A */
496 {GBE_STATSA_INFO(rx_good_frames)},
497 {GBE_STATSA_INFO(rx_broadcast_frames)},
498 {GBE_STATSA_INFO(rx_multicast_frames)},
499 {GBE_STATSA_INFO(rx_pause_frames)},
500 {GBE_STATSA_INFO(rx_crc_errors)},
501 {GBE_STATSA_INFO(rx_align_code_errors)},
502 {GBE_STATSA_INFO(rx_oversized_frames)},
503 {GBE_STATSA_INFO(rx_jabber_frames)},
504 {GBE_STATSA_INFO(rx_undersized_frames)},
505 {GBE_STATSA_INFO(rx_fragments)},
506 {GBE_STATSA_INFO(rx_bytes)},
507 {GBE_STATSA_INFO(tx_good_frames)},
508 {GBE_STATSA_INFO(tx_broadcast_frames)},
509 {GBE_STATSA_INFO(tx_multicast_frames)},
510 {GBE_STATSA_INFO(tx_pause_frames)},
511 {GBE_STATSA_INFO(tx_deferred_frames)},
512 {GBE_STATSA_INFO(tx_collision_frames)},
513 {GBE_STATSA_INFO(tx_single_coll_frames)},
514 {GBE_STATSA_INFO(tx_mult_coll_frames)},
515 {GBE_STATSA_INFO(tx_excessive_collisions)},
516 {GBE_STATSA_INFO(tx_late_collisions)},
517 {GBE_STATSA_INFO(tx_underrun)},
518 {GBE_STATSA_INFO(tx_carrier_sense_errors)},
519 {GBE_STATSA_INFO(tx_bytes)},
520 {GBE_STATSA_INFO(tx_64byte_frames)},
521 {GBE_STATSA_INFO(tx_65_to_127byte_frames)},
522 {GBE_STATSA_INFO(tx_128_to_255byte_frames)},
523 {GBE_STATSA_INFO(tx_256_to_511byte_frames)},
524 {GBE_STATSA_INFO(tx_512_to_1023byte_frames)},
525 {GBE_STATSA_INFO(tx_1024byte_frames)},
526 {GBE_STATSA_INFO(net_bytes)},
527 {GBE_STATSA_INFO(rx_sof_overruns)},
528 {GBE_STATSA_INFO(rx_mof_overruns)},
529 {GBE_STATSA_INFO(rx_dma_overruns)},
530 /* GBE module B */
531 {GBE_STATSB_INFO(rx_good_frames)},
532 {GBE_STATSB_INFO(rx_broadcast_frames)},
533 {GBE_STATSB_INFO(rx_multicast_frames)},
534 {GBE_STATSB_INFO(rx_pause_frames)},
535 {GBE_STATSB_INFO(rx_crc_errors)},
536 {GBE_STATSB_INFO(rx_align_code_errors)},
537 {GBE_STATSB_INFO(rx_oversized_frames)},
538 {GBE_STATSB_INFO(rx_jabber_frames)},
539 {GBE_STATSB_INFO(rx_undersized_frames)},
540 {GBE_STATSB_INFO(rx_fragments)},
541 {GBE_STATSB_INFO(rx_bytes)},
542 {GBE_STATSB_INFO(tx_good_frames)},
543 {GBE_STATSB_INFO(tx_broadcast_frames)},
544 {GBE_STATSB_INFO(tx_multicast_frames)},
545 {GBE_STATSB_INFO(tx_pause_frames)},
546 {GBE_STATSB_INFO(tx_deferred_frames)},
547 {GBE_STATSB_INFO(tx_collision_frames)},
548 {GBE_STATSB_INFO(tx_single_coll_frames)},
549 {GBE_STATSB_INFO(tx_mult_coll_frames)},
550 {GBE_STATSB_INFO(tx_excessive_collisions)},
551 {GBE_STATSB_INFO(tx_late_collisions)},
552 {GBE_STATSB_INFO(tx_underrun)},
553 {GBE_STATSB_INFO(tx_carrier_sense_errors)},
554 {GBE_STATSB_INFO(tx_bytes)},
555 {GBE_STATSB_INFO(tx_64byte_frames)},
556 {GBE_STATSB_INFO(tx_65_to_127byte_frames)},
557 {GBE_STATSB_INFO(tx_128_to_255byte_frames)},
558 {GBE_STATSB_INFO(tx_256_to_511byte_frames)},
559 {GBE_STATSB_INFO(tx_512_to_1023byte_frames)},
560 {GBE_STATSB_INFO(tx_1024byte_frames)},
561 {GBE_STATSB_INFO(net_bytes)},
562 {GBE_STATSB_INFO(rx_sof_overruns)},
563 {GBE_STATSB_INFO(rx_mof_overruns)},
564 {GBE_STATSB_INFO(rx_dma_overruns)},
565 /* GBE module C */
566 {GBE_STATSC_INFO(rx_good_frames)},
567 {GBE_STATSC_INFO(rx_broadcast_frames)},
568 {GBE_STATSC_INFO(rx_multicast_frames)},
569 {GBE_STATSC_INFO(rx_pause_frames)},
570 {GBE_STATSC_INFO(rx_crc_errors)},
571 {GBE_STATSC_INFO(rx_align_code_errors)},
572 {GBE_STATSC_INFO(rx_oversized_frames)},
573 {GBE_STATSC_INFO(rx_jabber_frames)},
574 {GBE_STATSC_INFO(rx_undersized_frames)},
575 {GBE_STATSC_INFO(rx_fragments)},
576 {GBE_STATSC_INFO(rx_bytes)},
577 {GBE_STATSC_INFO(tx_good_frames)},
578 {GBE_STATSC_INFO(tx_broadcast_frames)},
579 {GBE_STATSC_INFO(tx_multicast_frames)},
580 {GBE_STATSC_INFO(tx_pause_frames)},
581 {GBE_STATSC_INFO(tx_deferred_frames)},
582 {GBE_STATSC_INFO(tx_collision_frames)},
583 {GBE_STATSC_INFO(tx_single_coll_frames)},
584 {GBE_STATSC_INFO(tx_mult_coll_frames)},
585 {GBE_STATSC_INFO(tx_excessive_collisions)},
586 {GBE_STATSC_INFO(tx_late_collisions)},
587 {GBE_STATSC_INFO(tx_underrun)},
588 {GBE_STATSC_INFO(tx_carrier_sense_errors)},
589 {GBE_STATSC_INFO(tx_bytes)},
590 {GBE_STATSC_INFO(tx_64byte_frames)},
591 {GBE_STATSC_INFO(tx_65_to_127byte_frames)},
592 {GBE_STATSC_INFO(tx_128_to_255byte_frames)},
593 {GBE_STATSC_INFO(tx_256_to_511byte_frames)},
594 {GBE_STATSC_INFO(tx_512_to_1023byte_frames)},
595 {GBE_STATSC_INFO(tx_1024byte_frames)},
596 {GBE_STATSC_INFO(net_bytes)},
597 {GBE_STATSC_INFO(rx_sof_overruns)},
598 {GBE_STATSC_INFO(rx_mof_overruns)},
599 {GBE_STATSC_INFO(rx_dma_overruns)},
600 /* GBE module D */
601 {GBE_STATSD_INFO(rx_good_frames)},
602 {GBE_STATSD_INFO(rx_broadcast_frames)},
603 {GBE_STATSD_INFO(rx_multicast_frames)},
604 {GBE_STATSD_INFO(rx_pause_frames)},
605 {GBE_STATSD_INFO(rx_crc_errors)},
606 {GBE_STATSD_INFO(rx_align_code_errors)},
607 {GBE_STATSD_INFO(rx_oversized_frames)},
608 {GBE_STATSD_INFO(rx_jabber_frames)},
609 {GBE_STATSD_INFO(rx_undersized_frames)},
610 {GBE_STATSD_INFO(rx_fragments)},
611 {GBE_STATSD_INFO(rx_bytes)},
612 {GBE_STATSD_INFO(tx_good_frames)},
613 {GBE_STATSD_INFO(tx_broadcast_frames)},
614 {GBE_STATSD_INFO(tx_multicast_frames)},
615 {GBE_STATSD_INFO(tx_pause_frames)},
616 {GBE_STATSD_INFO(tx_deferred_frames)},
617 {GBE_STATSD_INFO(tx_collision_frames)},
618 {GBE_STATSD_INFO(tx_single_coll_frames)},
619 {GBE_STATSD_INFO(tx_mult_coll_frames)},
620 {GBE_STATSD_INFO(tx_excessive_collisions)},
621 {GBE_STATSD_INFO(tx_late_collisions)},
622 {GBE_STATSD_INFO(tx_underrun)},
623 {GBE_STATSD_INFO(tx_carrier_sense_errors)},
624 {GBE_STATSD_INFO(tx_bytes)},
625 {GBE_STATSD_INFO(tx_64byte_frames)},
626 {GBE_STATSD_INFO(tx_65_to_127byte_frames)},
627 {GBE_STATSD_INFO(tx_128_to_255byte_frames)},
628 {GBE_STATSD_INFO(tx_256_to_511byte_frames)},
629 {GBE_STATSD_INFO(tx_512_to_1023byte_frames)},
630 {GBE_STATSD_INFO(tx_1024byte_frames)},
631 {GBE_STATSD_INFO(net_bytes)},
632 {GBE_STATSD_INFO(rx_sof_overruns)},
633 {GBE_STATSD_INFO(rx_mof_overruns)},
634 {GBE_STATSD_INFO(rx_dma_overruns)},
635};
636
637#define XGBE_STATS0_INFO(field) "GBE_0:"#field, XGBE_STATS0_MODULE, \
638 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
639 offsetof(struct xgbe_hw_stats, field)
640
641#define XGBE_STATS1_INFO(field) "GBE_1:"#field, XGBE_STATS1_MODULE, \
642 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
643 offsetof(struct xgbe_hw_stats, field)
644
645#define XGBE_STATS2_INFO(field) "GBE_2:"#field, XGBE_STATS2_MODULE, \
646 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
647 offsetof(struct xgbe_hw_stats, field)
648
649static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
650 /* GBE module 0 */
651 {XGBE_STATS0_INFO(rx_good_frames)},
652 {XGBE_STATS0_INFO(rx_broadcast_frames)},
653 {XGBE_STATS0_INFO(rx_multicast_frames)},
654 {XGBE_STATS0_INFO(rx_oversized_frames)},
655 {XGBE_STATS0_INFO(rx_undersized_frames)},
656 {XGBE_STATS0_INFO(overrun_type4)},
657 {XGBE_STATS0_INFO(overrun_type5)},
658 {XGBE_STATS0_INFO(rx_bytes)},
659 {XGBE_STATS0_INFO(tx_good_frames)},
660 {XGBE_STATS0_INFO(tx_broadcast_frames)},
661 {XGBE_STATS0_INFO(tx_multicast_frames)},
662 {XGBE_STATS0_INFO(tx_bytes)},
663 {XGBE_STATS0_INFO(tx_64byte_frames)},
664 {XGBE_STATS0_INFO(tx_65_to_127byte_frames)},
665 {XGBE_STATS0_INFO(tx_128_to_255byte_frames)},
666 {XGBE_STATS0_INFO(tx_256_to_511byte_frames)},
667 {XGBE_STATS0_INFO(tx_512_to_1023byte_frames)},
668 {XGBE_STATS0_INFO(tx_1024byte_frames)},
669 {XGBE_STATS0_INFO(net_bytes)},
670 {XGBE_STATS0_INFO(rx_sof_overruns)},
671 {XGBE_STATS0_INFO(rx_mof_overruns)},
672 {XGBE_STATS0_INFO(rx_dma_overruns)},
673 /* XGBE module 1 */
674 {XGBE_STATS1_INFO(rx_good_frames)},
675 {XGBE_STATS1_INFO(rx_broadcast_frames)},
676 {XGBE_STATS1_INFO(rx_multicast_frames)},
677 {XGBE_STATS1_INFO(rx_pause_frames)},
678 {XGBE_STATS1_INFO(rx_crc_errors)},
679 {XGBE_STATS1_INFO(rx_align_code_errors)},
680 {XGBE_STATS1_INFO(rx_oversized_frames)},
681 {XGBE_STATS1_INFO(rx_jabber_frames)},
682 {XGBE_STATS1_INFO(rx_undersized_frames)},
683 {XGBE_STATS1_INFO(rx_fragments)},
684 {XGBE_STATS1_INFO(overrun_type4)},
685 {XGBE_STATS1_INFO(overrun_type5)},
686 {XGBE_STATS1_INFO(rx_bytes)},
687 {XGBE_STATS1_INFO(tx_good_frames)},
688 {XGBE_STATS1_INFO(tx_broadcast_frames)},
689 {XGBE_STATS1_INFO(tx_multicast_frames)},
690 {XGBE_STATS1_INFO(tx_pause_frames)},
691 {XGBE_STATS1_INFO(tx_deferred_frames)},
692 {XGBE_STATS1_INFO(tx_collision_frames)},
693 {XGBE_STATS1_INFO(tx_single_coll_frames)},
694 {XGBE_STATS1_INFO(tx_mult_coll_frames)},
695 {XGBE_STATS1_INFO(tx_excessive_collisions)},
696 {XGBE_STATS1_INFO(tx_late_collisions)},
697 {XGBE_STATS1_INFO(tx_underrun)},
698 {XGBE_STATS1_INFO(tx_carrier_sense_errors)},
699 {XGBE_STATS1_INFO(tx_bytes)},
700 {XGBE_STATS1_INFO(tx_64byte_frames)},
701 {XGBE_STATS1_INFO(tx_65_to_127byte_frames)},
702 {XGBE_STATS1_INFO(tx_128_to_255byte_frames)},
703 {XGBE_STATS1_INFO(tx_256_to_511byte_frames)},
704 {XGBE_STATS1_INFO(tx_512_to_1023byte_frames)},
705 {XGBE_STATS1_INFO(tx_1024byte_frames)},
706 {XGBE_STATS1_INFO(net_bytes)},
707 {XGBE_STATS1_INFO(rx_sof_overruns)},
708 {XGBE_STATS1_INFO(rx_mof_overruns)},
709 {XGBE_STATS1_INFO(rx_dma_overruns)},
710 /* XGBE module 2 */
711 {XGBE_STATS2_INFO(rx_good_frames)},
712 {XGBE_STATS2_INFO(rx_broadcast_frames)},
713 {XGBE_STATS2_INFO(rx_multicast_frames)},
714 {XGBE_STATS2_INFO(rx_pause_frames)},
715 {XGBE_STATS2_INFO(rx_crc_errors)},
716 {XGBE_STATS2_INFO(rx_align_code_errors)},
717 {XGBE_STATS2_INFO(rx_oversized_frames)},
718 {XGBE_STATS2_INFO(rx_jabber_frames)},
719 {XGBE_STATS2_INFO(rx_undersized_frames)},
720 {XGBE_STATS2_INFO(rx_fragments)},
721 {XGBE_STATS2_INFO(overrun_type4)},
722 {XGBE_STATS2_INFO(overrun_type5)},
723 {XGBE_STATS2_INFO(rx_bytes)},
724 {XGBE_STATS2_INFO(tx_good_frames)},
725 {XGBE_STATS2_INFO(tx_broadcast_frames)},
726 {XGBE_STATS2_INFO(tx_multicast_frames)},
727 {XGBE_STATS2_INFO(tx_pause_frames)},
728 {XGBE_STATS2_INFO(tx_deferred_frames)},
729 {XGBE_STATS2_INFO(tx_collision_frames)},
730 {XGBE_STATS2_INFO(tx_single_coll_frames)},
731 {XGBE_STATS2_INFO(tx_mult_coll_frames)},
732 {XGBE_STATS2_INFO(tx_excessive_collisions)},
733 {XGBE_STATS2_INFO(tx_late_collisions)},
734 {XGBE_STATS2_INFO(tx_underrun)},
735 {XGBE_STATS2_INFO(tx_carrier_sense_errors)},
736 {XGBE_STATS2_INFO(tx_bytes)},
737 {XGBE_STATS2_INFO(tx_64byte_frames)},
738 {XGBE_STATS2_INFO(tx_65_to_127byte_frames)},
739 {XGBE_STATS2_INFO(tx_128_to_255byte_frames)},
740 {XGBE_STATS2_INFO(tx_256_to_511byte_frames)},
741 {XGBE_STATS2_INFO(tx_512_to_1023byte_frames)},
742 {XGBE_STATS2_INFO(tx_1024byte_frames)},
743 {XGBE_STATS2_INFO(net_bytes)},
744 {XGBE_STATS2_INFO(rx_sof_overruns)},
745 {XGBE_STATS2_INFO(rx_mof_overruns)},
746 {XGBE_STATS2_INFO(rx_dma_overruns)},
747};
748
749#define for_each_intf(i, priv) \
750 list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
751
752#define for_each_sec_slave(slave, priv) \
753 list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
754
755#define first_sec_slave(priv) \
756 list_first_entry(&priv->secondary_slaves, \
757 struct gbe_slave, slave_list)
758
759static void keystone_get_drvinfo(struct net_device *ndev,
760 struct ethtool_drvinfo *info)
761{
762 strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
763 strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
764}
765
766static u32 keystone_get_msglevel(struct net_device *ndev)
767{
768 struct netcp_intf *netcp = netdev_priv(ndev);
769
770 return netcp->msg_enable;
771}
772
773static void keystone_set_msglevel(struct net_device *ndev, u32 value)
774{
775 struct netcp_intf *netcp = netdev_priv(ndev);
776
777 netcp->msg_enable = value;
778}
779
780static void keystone_get_stat_strings(struct net_device *ndev,
781 uint32_t stringset, uint8_t *data)
782{
783 struct netcp_intf *netcp = netdev_priv(ndev);
784 struct gbe_intf *gbe_intf;
785 struct gbe_priv *gbe_dev;
786 int i;
787
788 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
789 if (!gbe_intf)
790 return;
791 gbe_dev = gbe_intf->gbe_dev;
792
793 switch (stringset) {
794 case ETH_SS_STATS:
795 for (i = 0; i < gbe_dev->num_et_stats; i++) {
796 memcpy(data, gbe_dev->et_stats[i].desc,
797 ETH_GSTRING_LEN);
798 data += ETH_GSTRING_LEN;
799 }
800 break;
801 case ETH_SS_TEST:
802 break;
803 }
804}
805
806static int keystone_get_sset_count(struct net_device *ndev, int stringset)
807{
808 struct netcp_intf *netcp = netdev_priv(ndev);
809 struct gbe_intf *gbe_intf;
810 struct gbe_priv *gbe_dev;
811
812 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
813 if (!gbe_intf)
814 return -EINVAL;
815 gbe_dev = gbe_intf->gbe_dev;
816
817 switch (stringset) {
818 case ETH_SS_TEST:
819 return 0;
820 case ETH_SS_STATS:
821 return gbe_dev->num_et_stats;
822 default:
823 return -EINVAL;
824 }
825}
826
827static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
828{
829 void __iomem *base = NULL;
830 u32 __iomem *p;
831 u32 tmp = 0;
832 int i;
833
834 for (i = 0; i < gbe_dev->num_et_stats; i++) {
835 base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
836 p = base + gbe_dev->et_stats[i].offset;
837 tmp = readl(p);
838 gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
839 if (data)
840 data[i] = gbe_dev->hw_stats[i];
841 /* write-to-decrement:
842 * new register value = old register value - write value
843 */
844 writel(tmp, p);
845 }
846}
847
848static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
849{
850 void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
851 void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
852 u64 *hw_stats = &gbe_dev->hw_stats[0];
853 void __iomem *base = NULL;
854 u32 __iomem *p;
855 u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
856 int i, j, pair;
857
858 for (pair = 0; pair < 2; pair++) {
859 val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
860
861 if (pair == 0)
862 val &= ~GBE_STATS_CD_SEL;
863 else
864 val |= GBE_STATS_CD_SEL;
865
866 /* make the stat modules visible */
867 writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
868
869 for (i = 0; i < pair_size; i++) {
870 j = pair * pair_size + i;
871 switch (gbe_dev->et_stats[j].type) {
872 case GBE_STATSA_MODULE:
873 case GBE_STATSC_MODULE:
874 base = gbe_statsa;
875 break;
876 case GBE_STATSB_MODULE:
877 case GBE_STATSD_MODULE:
878 base = gbe_statsb;
879 break;
880 }
881
882 p = base + gbe_dev->et_stats[j].offset;
883 tmp = readl(p);
884 hw_stats[j] += tmp;
885 if (data)
886 data[j] = hw_stats[j];
887 /* write-to-decrement:
888 * new register value = old register value - write value
889 */
890 writel(tmp, p);
891 }
892 }
893}
894
895static void keystone_get_ethtool_stats(struct net_device *ndev,
896 struct ethtool_stats *stats,
897 uint64_t *data)
898{
899 struct netcp_intf *netcp = netdev_priv(ndev);
900 struct gbe_intf *gbe_intf;
901 struct gbe_priv *gbe_dev;
902
903 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
904 if (!gbe_intf)
905 return;
906
907 gbe_dev = gbe_intf->gbe_dev;
908 spin_lock_bh(&gbe_dev->hw_stats_lock);
909 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
910 gbe_update_stats_ver14(gbe_dev, data);
911 else
912 gbe_update_stats(gbe_dev, data);
913 spin_unlock_bh(&gbe_dev->hw_stats_lock);
914}
915
916static int keystone_get_settings(struct net_device *ndev,
917 struct ethtool_cmd *cmd)
918{
919 struct netcp_intf *netcp = netdev_priv(ndev);
920 struct phy_device *phy = ndev->phydev;
921 struct gbe_intf *gbe_intf;
922 int ret;
923
924 if (!phy)
925 return -EINVAL;
926
927 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
928 if (!gbe_intf)
929 return -EINVAL;
930
931 if (!gbe_intf->slave)
932 return -EINVAL;
933
934 ret = phy_ethtool_gset(phy, cmd);
935 if (!ret)
936 cmd->port = gbe_intf->slave->phy_port_t;
937
938 return ret;
939}
940
941static int keystone_set_settings(struct net_device *ndev,
942 struct ethtool_cmd *cmd)
943{
944 struct netcp_intf *netcp = netdev_priv(ndev);
945 struct phy_device *phy = ndev->phydev;
946 struct gbe_intf *gbe_intf;
947 u32 features = cmd->advertising & cmd->supported;
948
949 if (!phy)
950 return -EINVAL;
951
952 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
953 if (!gbe_intf)
954 return -EINVAL;
955
956 if (!gbe_intf->slave)
957 return -EINVAL;
958
959 if (cmd->port != gbe_intf->slave->phy_port_t) {
960 if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
961 return -EINVAL;
962
963 if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
964 return -EINVAL;
965
966 if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
967 return -EINVAL;
968
969 if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
970 return -EINVAL;
971
972 if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
973 return -EINVAL;
974 }
975
976 gbe_intf->slave->phy_port_t = cmd->port;
977 return phy_ethtool_sset(phy, cmd);
978}
979
980static const struct ethtool_ops keystone_ethtool_ops = {
981 .get_drvinfo = keystone_get_drvinfo,
982 .get_link = ethtool_op_get_link,
983 .get_msglevel = keystone_get_msglevel,
984 .set_msglevel = keystone_set_msglevel,
985 .get_strings = keystone_get_stat_strings,
986 .get_sset_count = keystone_get_sset_count,
987 .get_ethtool_stats = keystone_get_ethtool_stats,
988 .get_settings = keystone_get_settings,
989 .set_settings = keystone_set_settings,
990};
991
992#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
993 ((mac)[2] << 16) | ((mac)[3] << 24))
994#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
995
996static void gbe_set_slave_mac(struct gbe_slave *slave,
997 struct gbe_intf *gbe_intf)
998{
999 struct net_device *ndev = gbe_intf->ndev;
1000
1001 writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
1002 writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
1003}
1004
1005static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
1006{
1007 if (priv->host_port == 0)
1008 return slave_num + 1;
1009
1010 return slave_num;
1011}
1012
1013static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
1014 struct net_device *ndev,
1015 struct gbe_slave *slave,
1016 int up)
1017{
1018 struct phy_device *phy = slave->phy;
1019 u32 mac_control = 0;
1020
1021 if (up) {
1022 mac_control = slave->mac_control;
1023 if (phy && (phy->speed == SPEED_1000)) {
1024 mac_control |= MACSL_GIG_MODE;
1025 mac_control &= ~MACSL_XGIG_MODE;
1026 } else if (phy && (phy->speed == SPEED_10000)) {
1027 mac_control |= MACSL_XGIG_MODE;
1028 mac_control &= ~MACSL_GIG_MODE;
1029 }
1030
1031 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1032 mac_control));
1033
1034 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1035 ALE_PORT_STATE,
1036 ALE_PORT_STATE_FORWARD);
1037
1038 if (ndev && slave->open)
1039 netif_carrier_on(ndev);
1040 } else {
1041 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1042 mac_control));
1043 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1044 ALE_PORT_STATE,
1045 ALE_PORT_STATE_DISABLE);
1046 if (ndev)
1047 netif_carrier_off(ndev);
1048 }
1049
1050 if (phy)
1051 phy_print_status(phy);
1052}
1053
1054static bool gbe_phy_link_status(struct gbe_slave *slave)
1055{
1056 return !slave->phy || slave->phy->link;
1057}
1058
1059static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1060 struct gbe_slave *slave,
1061 struct net_device *ndev)
1062{
1063 int sp = slave->slave_num;
1064 int phy_link_state, sgmii_link_state = 1, link_state;
1065
1066 if (!slave->open)
1067 return;
1068
1069 if (!SLAVE_LINK_IS_XGMII(slave))
1070 sgmii_link_state = netcp_sgmii_get_port_link(SGMII_BASE(sp),
1071 sp);
1072 phy_link_state = gbe_phy_link_status(slave);
1073 link_state = phy_link_state & sgmii_link_state;
1074
1075 if (atomic_xchg(&slave->link_state, link_state) != link_state)
1076 netcp_ethss_link_state_action(gbe_dev, ndev, slave,
1077 link_state);
1078}
1079
1080static void xgbe_adjust_link(struct net_device *ndev)
1081{
1082 struct netcp_intf *netcp = netdev_priv(ndev);
1083 struct gbe_intf *gbe_intf;
1084
1085 gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1086 if (!gbe_intf)
1087 return;
1088
1089 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1090 ndev);
1091}
1092
1093static void gbe_adjust_link(struct net_device *ndev)
1094{
1095 struct netcp_intf *netcp = netdev_priv(ndev);
1096 struct gbe_intf *gbe_intf;
1097
1098 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1099 if (!gbe_intf)
1100 return;
1101
1102 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1103 ndev);
1104}
1105
1106static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
1107{
1108 struct gbe_priv *gbe_dev = netdev_priv(ndev);
1109 struct gbe_slave *slave;
1110
1111 for_each_sec_slave(slave, gbe_dev)
1112 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
1113}
1114
1115/* Reset EMAC
1116 * Soft reset is set and polled until clear, or until a timeout occurs
1117 */
1118static int gbe_port_reset(struct gbe_slave *slave)
1119{
1120 u32 i, v;
1121
1122 /* Set the soft reset bit */
1123 writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
1124
1125 /* Wait for the bit to clear */
1126 for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
1127 v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
1128 if ((v & SOFT_RESET_MASK) != SOFT_RESET)
1129 return 0;
1130 }
1131
1132 /* Timeout on the reset */
1133 return GMACSL_RET_WARN_RESET_INCOMPLETE;
1134}
1135
1136/* Configure EMAC */
1137static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1138 int max_rx_len)
1139{
1140 u32 xgmii_mode;
1141
1142 if (max_rx_len > NETCP_MAX_FRAME_SIZE)
1143 max_rx_len = NETCP_MAX_FRAME_SIZE;
1144
1145 /* Enable correct MII mode at SS level */
1146 if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
1147 (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
1148 xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
1149 xgmii_mode |= (1 << slave->slave_num);
1150 writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
1151 }
1152
1153 writel(max_rx_len, GBE_REG_ADDR(slave, emac_regs, rx_maxlen));
1154 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1155}
1156
1157static void gbe_slave_stop(struct gbe_intf *intf)
1158{
1159 struct gbe_priv *gbe_dev = intf->gbe_dev;
1160 struct gbe_slave *slave = intf->slave;
1161
1162 gbe_port_reset(slave);
1163 /* Disable forwarding */
1164 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1165 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1166 cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
1167 1 << slave->port_num, 0, 0);
1168
1169 if (!slave->phy)
1170 return;
1171
1172 phy_stop(slave->phy);
1173 phy_disconnect(slave->phy);
1174 slave->phy = NULL;
1175}
1176
1177static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
1178{
1179 void __iomem *sgmii_port_regs;
1180
1181 sgmii_port_regs = priv->sgmii_port_regs;
1182 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1183 sgmii_port_regs = priv->sgmii_port34_regs;
1184
1185 if (!SLAVE_LINK_IS_XGMII(slave)) {
1186 netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
1187 netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
1188 slave->link_interface);
1189 }
1190}
1191
1192static int gbe_slave_open(struct gbe_intf *gbe_intf)
1193{
1194 struct gbe_priv *priv = gbe_intf->gbe_dev;
1195 struct gbe_slave *slave = gbe_intf->slave;
1196 phy_interface_t phy_mode;
1197 bool has_phy = false;
1198
1199 void (*hndlr)(struct net_device *) = gbe_adjust_link;
1200
1201 gbe_sgmii_config(priv, slave);
1202 gbe_port_reset(slave);
1203 gbe_port_config(priv, slave, priv->rx_packet_max);
1204 gbe_set_slave_mac(slave, gbe_intf);
1205 /* enable forwarding */
1206 cpsw_ale_control_set(priv->ale, slave->port_num,
1207 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1208 cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
1209 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
1210
1211 if (slave->link_interface == SGMII_LINK_MAC_PHY) {
1212 has_phy = true;
1213 phy_mode = PHY_INTERFACE_MODE_SGMII;
1214 slave->phy_port_t = PORT_MII;
1215 } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
1216 has_phy = true;
1217 phy_mode = PHY_INTERFACE_MODE_NA;
1218 slave->phy_port_t = PORT_FIBRE;
1219 }
1220
1221 if (has_phy) {
1222 if (priv->ss_version == XGBE_SS_VERSION_10)
1223 hndlr = xgbe_adjust_link;
1224
1225 slave->phy = of_phy_connect(gbe_intf->ndev,
1226 slave->phy_node,
1227 hndlr, 0,
1228 phy_mode);
1229 if (!slave->phy) {
1230 dev_err(priv->dev, "phy not found on slave %d\n",
1231 slave->slave_num);
1232 return -ENODEV;
1233 }
1234 dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
1235 dev_name(&slave->phy->dev));
1236 phy_start(slave->phy);
1237 phy_read_status(slave->phy);
1238 }
1239 return 0;
1240}
1241
1242static void gbe_init_host_port(struct gbe_priv *priv)
1243{
1244 int bypass_en = 1;
1245 /* Max length register */
1246 writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
1247 rx_maxlen));
1248
1249 cpsw_ale_start(priv->ale);
1250
1251 if (priv->enable_ale)
1252 bypass_en = 0;
1253
1254 cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
1255
1256 cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
1257
1258 cpsw_ale_control_set(priv->ale, priv->host_port,
1259 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1260
1261 cpsw_ale_control_set(priv->ale, 0,
1262 ALE_PORT_UNKNOWN_VLAN_MEMBER,
1263 GBE_PORT_MASK(priv->ale_ports));
1264
1265 cpsw_ale_control_set(priv->ale, 0,
1266 ALE_PORT_UNKNOWN_MCAST_FLOOD,
1267 GBE_PORT_MASK(priv->ale_ports - 1));
1268
1269 cpsw_ale_control_set(priv->ale, 0,
1270 ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
1271 GBE_PORT_MASK(priv->ale_ports));
1272
1273 cpsw_ale_control_set(priv->ale, 0,
1274 ALE_PORT_UNTAGGED_EGRESS,
1275 GBE_PORT_MASK(priv->ale_ports));
1276}
1277
1278static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1279{
1280 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1281 u16 vlan_id;
1282
1283 cpsw_ale_add_mcast(gbe_dev->ale, addr,
1284 GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
1285 ALE_MCAST_FWD_2);
1286 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
1287 cpsw_ale_add_mcast(gbe_dev->ale, addr,
1288 GBE_PORT_MASK(gbe_dev->ale_ports),
1289 ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
1290 }
1291}
1292
1293static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1294{
1295 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1296 u16 vlan_id;
1297
1298 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
1299
1300 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
1301 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
1302 ALE_VLAN, vlan_id);
1303}
1304
1305static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1306{
1307 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1308 u16 vlan_id;
1309
1310 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
1311
1312 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
1313 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
1314 }
1315}
1316
1317static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1318{
1319 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1320 u16 vlan_id;
1321
1322 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
1323
1324 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
1325 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
1326 ALE_VLAN, vlan_id);
1327 }
1328}
1329
1330static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
1331{
1332 struct gbe_intf *gbe_intf = intf_priv;
1333 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1334
1335 dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
1336 naddr->addr, naddr->type);
1337
1338 switch (naddr->type) {
1339 case ADDR_MCAST:
1340 case ADDR_BCAST:
1341 gbe_add_mcast_addr(gbe_intf, naddr->addr);
1342 break;
1343 case ADDR_UCAST:
1344 case ADDR_DEV:
1345 gbe_add_ucast_addr(gbe_intf, naddr->addr);
1346 break;
1347 case ADDR_ANY:
1348 /* nothing to do for promiscuous */
1349 default:
1350 break;
1351 }
1352
1353 return 0;
1354}
1355
1356static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
1357{
1358 struct gbe_intf *gbe_intf = intf_priv;
1359 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1360
1361 dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
1362 naddr->addr, naddr->type);
1363
1364 switch (naddr->type) {
1365 case ADDR_MCAST:
1366 case ADDR_BCAST:
1367 gbe_del_mcast_addr(gbe_intf, naddr->addr);
1368 break;
1369 case ADDR_UCAST:
1370 case ADDR_DEV:
1371 gbe_del_ucast_addr(gbe_intf, naddr->addr);
1372 break;
1373 case ADDR_ANY:
1374 /* nothing to do for promiscuous */
1375 default:
1376 break;
1377 }
1378
1379 return 0;
1380}
1381
1382static int gbe_add_vid(void *intf_priv, int vid)
1383{
1384 struct gbe_intf *gbe_intf = intf_priv;
1385 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1386
1387 set_bit(vid, gbe_intf->active_vlans);
1388
1389 cpsw_ale_add_vlan(gbe_dev->ale, vid,
1390 GBE_PORT_MASK(gbe_dev->ale_ports),
1391 GBE_MASK_NO_PORTS,
1392 GBE_PORT_MASK(gbe_dev->ale_ports),
1393 GBE_PORT_MASK(gbe_dev->ale_ports - 1));
1394
1395 return 0;
1396}
1397
1398static int gbe_del_vid(void *intf_priv, int vid)
1399{
1400 struct gbe_intf *gbe_intf = intf_priv;
1401 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1402
1403 cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
1404 clear_bit(vid, gbe_intf->active_vlans);
1405 return 0;
1406}
1407
1408static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
1409{
1410 struct gbe_intf *gbe_intf = intf_priv;
1411 struct phy_device *phy = gbe_intf->slave->phy;
1412 int ret = -EOPNOTSUPP;
1413
1414 if (phy)
1415 ret = phy_mii_ioctl(phy, req, cmd);
1416
1417 return ret;
1418}
1419
1420static void netcp_ethss_timer(unsigned long arg)
1421{
1422 struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
1423 struct gbe_intf *gbe_intf;
1424 struct gbe_slave *slave;
1425
1426 /* Check & update SGMII link state of interfaces */
1427 for_each_intf(gbe_intf, gbe_dev) {
1428 if (!gbe_intf->slave->open)
1429 continue;
1430 netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
1431 gbe_intf->ndev);
1432 }
1433
1434 /* Check & update SGMII link state of secondary ports */
1435 for_each_sec_slave(slave, gbe_dev) {
1436 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
1437 }
1438
1439 spin_lock_bh(&gbe_dev->hw_stats_lock);
1440
1441 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1442 gbe_update_stats_ver14(gbe_dev, NULL);
1443 else
1444 gbe_update_stats(gbe_dev, NULL);
1445
1446 spin_unlock_bh(&gbe_dev->hw_stats_lock);
1447
1448 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
1449 add_timer(&gbe_dev->timer);
1450}
1451
1452static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
1453{
1454 struct gbe_intf *gbe_intf = data;
1455
1456 p_info->tx_pipe = &gbe_intf->tx_pipe;
1457 return 0;
1458}
1459
1460static int gbe_open(void *intf_priv, struct net_device *ndev)
1461{
1462 struct gbe_intf *gbe_intf = intf_priv;
1463 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1464 struct netcp_intf *netcp = netdev_priv(ndev);
1465 struct gbe_slave *slave = gbe_intf->slave;
1466 int port_num = slave->port_num;
1467 u32 reg;
1468 int ret;
1469
1470 reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
1471 dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
1472 GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
1473 GBE_RTL_VERSION(reg), GBE_IDENT(reg));
1474
1475 if (gbe_dev->enable_ale)
1476 gbe_intf->tx_pipe.dma_psflags = 0;
1477 else
1478 gbe_intf->tx_pipe.dma_psflags = port_num;
1479
1480 dev_dbg(gbe_dev->dev, "opened TX channel %s: %p with psflags %d\n",
1481 gbe_intf->tx_pipe.dma_chan_name,
1482 gbe_intf->tx_pipe.dma_channel,
1483 gbe_intf->tx_pipe.dma_psflags);
1484
1485 gbe_slave_stop(gbe_intf);
1486
1487 /* disable priority elevation and enable statistics on all ports */
1488 writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
1489
1490 /* Control register */
1491 writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
1492
1493 /* All statistics enabled and STAT AB visible by default */
1494 writel(GBE_REG_VAL_STAT_ENABLE_ALL, GBE_REG_ADDR(gbe_dev, switch_regs,
1495 stat_port_en));
1496
1497 ret = gbe_slave_open(gbe_intf);
1498 if (ret)
1499 goto fail;
1500
1501 netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
1502 gbe_intf);
1503
1504 slave->open = true;
1505 netcp_ethss_update_link_state(gbe_dev, slave, ndev);
1506 return 0;
1507
1508fail:
1509 gbe_slave_stop(gbe_intf);
1510 return ret;
1511}
1512
1513static int gbe_close(void *intf_priv, struct net_device *ndev)
1514{
1515 struct gbe_intf *gbe_intf = intf_priv;
1516 struct netcp_intf *netcp = netdev_priv(ndev);
1517
1518 gbe_slave_stop(gbe_intf);
1519 netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
1520 gbe_intf);
1521
1522 gbe_intf->slave->open = false;
1523 atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
1524 return 0;
1525}
1526
1527static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1528 struct device_node *node)
1529{
1530 int port_reg_num;
1531 u32 port_reg_ofs, emac_reg_ofs;
1532
1533 if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
1534 dev_err(gbe_dev->dev, "missing slave-port parameter\n");
1535 return -EINVAL;
1536 }
1537
1538 if (of_property_read_u32(node, "link-interface",
1539 &slave->link_interface)) {
1540 dev_warn(gbe_dev->dev,
1541 "missing link-interface value defaulting to 1G mac-phy link\n");
1542 slave->link_interface = SGMII_LINK_MAC_PHY;
1543 }
1544
1545 slave->open = false;
1546 slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
1547 slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
1548
1549 if (slave->link_interface >= XGMII_LINK_MAC_PHY)
1550 slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
1551 else
1552 slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
1553
1554 /* Emac regs memmap are contiguous but port regs are not */
1555 port_reg_num = slave->slave_num;
1556 if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
1557 if (slave->slave_num > 1) {
1558 port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
1559 port_reg_num -= 2;
1560 } else {
1561 port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
1562 }
1563 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
1564 port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
1565 } else {
1566 dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
1567 gbe_dev->ss_version);
1568 return -EINVAL;
1569 }
1570
1571 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1572 emac_reg_ofs = GBE13_EMAC_OFFSET;
1573 else if (gbe_dev->ss_version == XGBE_SS_VERSION_10)
1574 emac_reg_ofs = XGBE10_EMAC_OFFSET;
1575
1576 slave->port_regs = gbe_dev->ss_regs + port_reg_ofs +
1577 (0x30 * port_reg_num);
1578 slave->emac_regs = gbe_dev->ss_regs + emac_reg_ofs +
1579 (0x40 * slave->slave_num);
1580
1581 if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
1582 /* Initialize slave port register offsets */
1583 GBE_SET_REG_OFS(slave, port_regs, port_vlan);
1584 GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
1585 GBE_SET_REG_OFS(slave, port_regs, sa_lo);
1586 GBE_SET_REG_OFS(slave, port_regs, sa_hi);
1587 GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
1588 GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
1589 GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
1590 GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
1591 GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
1592
1593 /* Initialize EMAC register offsets */
1594 GBE_SET_REG_OFS(slave, emac_regs, mac_control);
1595 GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
1596 GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
1597
1598 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
1599 /* Initialize slave port register offsets */
1600 XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
1601 XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
1602 XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
1603 XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
1604 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
1605 XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
1606 XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
1607 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
1608 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
1609
1610 /* Initialize EMAC register offsets */
1611 XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
1612 XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
1613 XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
1614 }
1615
1616 atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
1617 return 0;
1618}
1619
1620static void init_secondary_ports(struct gbe_priv *gbe_dev,
1621 struct device_node *node)
1622{
1623 struct device *dev = gbe_dev->dev;
1624 phy_interface_t phy_mode;
1625 struct gbe_priv **priv;
1626 struct device_node *port;
1627 struct gbe_slave *slave;
1628 bool mac_phy_link = false;
1629
1630 for_each_child_of_node(node, port) {
1631 slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
1632 if (!slave) {
1633 dev_err(dev,
1634 "memomry alloc failed for secondary port(%s), skipping...\n",
1635 port->name);
1636 continue;
1637 }
1638
1639 if (init_slave(gbe_dev, slave, port)) {
1640 dev_err(dev,
1641 "Failed to initialize secondary port(%s), skipping...\n",
1642 port->name);
1643 devm_kfree(dev, slave);
1644 continue;
1645 }
1646
1647 gbe_sgmii_config(gbe_dev, slave);
1648 gbe_port_reset(slave);
1649 gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
1650 list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
1651 gbe_dev->num_slaves++;
1652 if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
1653 (slave->link_interface == XGMII_LINK_MAC_PHY))
1654 mac_phy_link = true;
1655
1656 slave->open = true;
1657 }
1658
1659 /* of_phy_connect() is needed only for MAC-PHY interface */
1660 if (!mac_phy_link)
1661 return;
1662
1663 /* Allocate dummy netdev device for attaching to phy device */
1664 gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
1665 NET_NAME_UNKNOWN, ether_setup);
1666 if (!gbe_dev->dummy_ndev) {
1667 dev_err(dev,
1668 "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
1669 return;
1670 }
1671 priv = netdev_priv(gbe_dev->dummy_ndev);
1672 *priv = gbe_dev;
1673
1674 if (slave->link_interface == SGMII_LINK_MAC_PHY) {
1675 phy_mode = PHY_INTERFACE_MODE_SGMII;
1676 slave->phy_port_t = PORT_MII;
1677 } else {
1678 phy_mode = PHY_INTERFACE_MODE_NA;
1679 slave->phy_port_t = PORT_FIBRE;
1680 }
1681
1682 for_each_sec_slave(slave, gbe_dev) {
1683 if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
1684 (slave->link_interface != XGMII_LINK_MAC_PHY))
1685 continue;
1686 slave->phy =
1687 of_phy_connect(gbe_dev->dummy_ndev,
1688 slave->phy_node,
1689 gbe_adjust_link_sec_slaves,
1690 0, phy_mode);
1691 if (!slave->phy) {
1692 dev_err(dev, "phy not found for slave %d\n",
1693 slave->slave_num);
1694 slave->phy = NULL;
1695 } else {
1696 dev_dbg(dev, "phy found: id is: 0x%s\n",
1697 dev_name(&slave->phy->dev));
1698 phy_start(slave->phy);
1699 phy_read_status(slave->phy);
1700 }
1701 }
1702}
1703
1704static void free_secondary_ports(struct gbe_priv *gbe_dev)
1705{
1706 struct gbe_slave *slave;
1707
1708 for (;;) {
1709 slave = first_sec_slave(gbe_dev);
1710 if (!slave)
1711 break;
1712 if (slave->phy)
1713 phy_disconnect(slave->phy);
1714 list_del(&slave->slave_list);
1715 }
1716 if (gbe_dev->dummy_ndev)
1717 free_netdev(gbe_dev->dummy_ndev);
1718}
1719
1720static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
1721 struct device_node *node)
1722{
1723 struct resource res;
1724 void __iomem *regs;
1725 int ret, i;
1726
1727 ret = of_address_to_resource(node, 0, &res);
1728 if (ret) {
1729 dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe subsystem regs\n",
1730 node->name);
1731 return ret;
1732 }
1733
1734 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1735 if (IS_ERR(regs)) {
1736 dev_err(gbe_dev->dev, "Failed to map xgbe register base\n");
1737 return PTR_ERR(regs);
1738 }
1739 gbe_dev->ss_regs = regs;
1740
1741 ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
1742 if (ret) {
1743 dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe serdes regs\n",
1744 node->name);
1745 return ret;
1746 }
1747
1748 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1749 if (IS_ERR(regs)) {
1750 dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
1751 return PTR_ERR(regs);
1752 }
1753 gbe_dev->xgbe_serdes_regs = regs;
1754
1755 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
1756 XGBE10_NUM_STAT_ENTRIES *
1757 (XGBE10_NUM_SLAVES + 1) * sizeof(u64),
1758 GFP_KERNEL);
1759 if (!gbe_dev->hw_stats) {
1760 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
1761 return -ENOMEM;
1762 }
1763
1764 gbe_dev->ss_version = XGBE_SS_VERSION_10;
1765 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
1766 XGBE10_SGMII_MODULE_OFFSET;
1767 gbe_dev->switch_regs = gbe_dev->ss_regs + XGBE10_SWITCH_MODULE_OFFSET;
1768 gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
1769
1770 for (i = 0; i < XGBE10_NUM_HW_STATS_MOD; i++)
1771 gbe_dev->hw_stats_regs[i] = gbe_dev->ss_regs +
1772 XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
1773
1774 gbe_dev->ale_reg = gbe_dev->ss_regs + XGBE10_ALE_OFFSET;
1775 gbe_dev->ale_ports = XGBE10_NUM_ALE_PORTS;
1776 gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
1777 gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
1778 gbe_dev->et_stats = xgbe10_et_stats;
1779 gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
1780
1781 /* Subsystem registers */
1782 XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
1783 XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
1784
1785 /* Switch module registers */
1786 XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
1787 XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
1788 XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
1789 XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
1790 XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
1791
1792 /* Host port registers */
1793 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
1794 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
1795 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
1796 return 0;
1797}
1798
1799static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
1800 struct device_node *node)
1801{
1802 struct resource res;
1803 void __iomem *regs;
1804 int ret;
1805
1806 ret = of_address_to_resource(node, 0, &res);
1807 if (ret) {
1808 dev_err(gbe_dev->dev, "Can't translate of node(%s) address\n",
1809 node->name);
1810 return ret;
1811 }
1812
1813 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1814 if (IS_ERR(regs)) {
1815 dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
1816 return PTR_ERR(regs);
1817 }
1818 gbe_dev->ss_regs = regs;
1819 gbe_dev->ss_version = readl(gbe_dev->ss_regs);
1820 return 0;
1821}
1822
1823static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
1824 struct device_node *node)
1825{
1826 void __iomem *regs;
1827 int i;
1828
1829 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
1830 GBE13_NUM_HW_STAT_ENTRIES *
1831 GBE13_NUM_SLAVES * sizeof(u64),
1832 GFP_KERNEL);
1833 if (!gbe_dev->hw_stats) {
1834 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
1835 return -ENOMEM;
1836 }
1837
1838 regs = gbe_dev->ss_regs;
1839 gbe_dev->sgmii_port_regs = regs + GBE13_SGMII_MODULE_OFFSET;
1840 gbe_dev->sgmii_port34_regs = regs + GBE13_SGMII34_MODULE_OFFSET;
1841 gbe_dev->switch_regs = regs + GBE13_SWITCH_MODULE_OFFSET;
1842 gbe_dev->host_port_regs = regs + GBE13_HOST_PORT_OFFSET;
1843
1844 for (i = 0; i < GBE13_NUM_HW_STATS_MOD; i++)
1845 gbe_dev->hw_stats_regs[i] = regs + GBE13_HW_STATS_OFFSET +
1846 (GBE_HW_STATS_REG_MAP_SZ * i);
1847
1848 gbe_dev->ale_reg = regs + GBE13_ALE_OFFSET;
1849 gbe_dev->ale_ports = GBE13_NUM_ALE_PORTS;
1850 gbe_dev->host_port = GBE13_HOST_PORT_NUM;
1851 gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
1852 gbe_dev->et_stats = gbe13_et_stats;
1853 gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
1854
1855 /* Subsystem registers */
1856 GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
1857
1858 /* Switch module registers */
1859 GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
1860 GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
1861 GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
1862 GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
1863 GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
1864 GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
1865
1866 /* Host port registers */
1867 GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
1868 GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
1869 return 0;
1870}
1871
1872static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
1873 struct device_node *node, void **inst_priv)
1874{
1875 struct device_node *interfaces, *interface;
1876 struct device_node *secondary_ports;
1877 struct cpsw_ale_params ale_params;
1878 struct gbe_priv *gbe_dev;
1879 u32 slave_num;
1880 int ret = 0;
1881
1882 if (!node) {
1883 dev_err(dev, "device tree info unavailable\n");
1884 return -ENODEV;
1885 }
1886
1887 gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
1888 if (!gbe_dev)
1889 return -ENOMEM;
1890
1891 gbe_dev->dev = dev;
1892 gbe_dev->netcp_device = netcp_device;
1893 gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
1894
1895 /* init the hw stats lock */
1896 spin_lock_init(&gbe_dev->hw_stats_lock);
1897
1898 if (of_find_property(node, "enable-ale", NULL)) {
1899 gbe_dev->enable_ale = true;
1900 dev_info(dev, "ALE enabled\n");
1901 } else {
1902 gbe_dev->enable_ale = false;
1903 dev_dbg(dev, "ALE bypass enabled*\n");
1904 }
1905
1906 ret = of_property_read_u32(node, "tx-queue",
1907 &gbe_dev->tx_queue_id);
1908 if (ret < 0) {
1909 dev_err(dev, "missing tx_queue parameter\n");
1910 gbe_dev->tx_queue_id = GBE_TX_QUEUE;
1911 }
1912
1913 ret = of_property_read_string(node, "tx-channel",
1914 &gbe_dev->dma_chan_name);
1915 if (ret < 0) {
1916 dev_err(dev, "missing \"tx-channel\" parameter\n");
1917 ret = -ENODEV;
1918 goto quit;
1919 }
1920
1921 if (!strcmp(node->name, "gbe")) {
1922 ret = get_gbe_resource_version(gbe_dev, node);
1923 if (ret)
1924 goto quit;
1925
1926 ret = set_gbe_ethss14_priv(gbe_dev, node);
1927 if (ret)
1928 goto quit;
1929 } else if (!strcmp(node->name, "xgbe")) {
1930 ret = set_xgbe_ethss10_priv(gbe_dev, node);
1931 if (ret)
1932 goto quit;
1933 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
1934 gbe_dev->ss_regs);
1935 if (ret)
1936 goto quit;
1937 } else {
1938 dev_err(dev, "unknown GBE node(%s)\n", node->name);
1939 ret = -ENODEV;
1940 goto quit;
1941 }
1942
1943 interfaces = of_get_child_by_name(node, "interfaces");
1944 if (!interfaces)
1945 dev_err(dev, "could not find interfaces\n");
1946
1947 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
1948 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
1949 if (ret)
1950 goto quit;
1951
1952 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
1953 if (ret)
1954 goto quit;
1955
1956 /* Create network interfaces */
1957 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
1958 for_each_child_of_node(interfaces, interface) {
1959 ret = of_property_read_u32(interface, "slave-port", &slave_num);
1960 if (ret) {
1961 dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
1962 interface->name);
1963 continue;
1964 }
1965 gbe_dev->num_slaves++;
1966 }
1967
1968 if (!gbe_dev->num_slaves)
1969 dev_warn(dev, "No network interface configured\n");
1970
1971 /* Initialize Secondary slave ports */
1972 secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
1973 INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
1974 if (secondary_ports)
1975 init_secondary_ports(gbe_dev, secondary_ports);
1976 of_node_put(secondary_ports);
1977
1978 if (!gbe_dev->num_slaves) {
1979 dev_err(dev, "No network interface or secondary ports configured\n");
1980 ret = -ENODEV;
1981 goto quit;
1982 }
1983
1984 memset(&ale_params, 0, sizeof(ale_params));
1985 ale_params.dev = gbe_dev->dev;
1986 ale_params.ale_regs = gbe_dev->ale_reg;
1987 ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
1988 ale_params.ale_entries = gbe_dev->ale_entries;
1989 ale_params.ale_ports = gbe_dev->ale_ports;
1990
1991 gbe_dev->ale = cpsw_ale_create(&ale_params);
1992 if (!gbe_dev->ale) {
1993 dev_err(gbe_dev->dev, "error initializing ale engine\n");
1994 ret = -ENODEV;
1995 goto quit;
1996 } else {
1997 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
1998 }
1999
2000 /* initialize host port */
2001 gbe_init_host_port(gbe_dev);
2002
2003 init_timer(&gbe_dev->timer);
2004 gbe_dev->timer.data = (unsigned long)gbe_dev;
2005 gbe_dev->timer.function = netcp_ethss_timer;
2006 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
2007 add_timer(&gbe_dev->timer);
2008 *inst_priv = gbe_dev;
2009 return 0;
2010
2011quit:
2012 if (gbe_dev->hw_stats)
2013 devm_kfree(dev, gbe_dev->hw_stats);
2014 cpsw_ale_destroy(gbe_dev->ale);
2015 if (gbe_dev->ss_regs)
2016 devm_iounmap(dev, gbe_dev->ss_regs);
2017 of_node_put(interfaces);
2018 devm_kfree(dev, gbe_dev);
2019 return ret;
2020}
2021
2022static int gbe_attach(void *inst_priv, struct net_device *ndev,
2023 struct device_node *node, void **intf_priv)
2024{
2025 struct gbe_priv *gbe_dev = inst_priv;
2026 struct gbe_intf *gbe_intf;
2027 int ret;
2028
2029 if (!node) {
2030 dev_err(gbe_dev->dev, "interface node not available\n");
2031 return -ENODEV;
2032 }
2033
2034 gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
2035 if (!gbe_intf)
2036 return -ENOMEM;
2037
2038 gbe_intf->ndev = ndev;
2039 gbe_intf->dev = gbe_dev->dev;
2040 gbe_intf->gbe_dev = gbe_dev;
2041
2042 gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
2043 sizeof(*gbe_intf->slave),
2044 GFP_KERNEL);
2045 if (!gbe_intf->slave) {
2046 ret = -ENOMEM;
2047 goto fail;
2048 }
2049
2050 if (init_slave(gbe_dev, gbe_intf->slave, node)) {
2051 ret = -ENODEV;
2052 goto fail;
2053 }
2054
2055 gbe_intf->tx_pipe = gbe_dev->tx_pipe;
2056 ndev->ethtool_ops = &keystone_ethtool_ops;
2057 list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
2058 *intf_priv = gbe_intf;
2059 return 0;
2060
2061fail:
2062 if (gbe_intf->slave)
2063 devm_kfree(gbe_dev->dev, gbe_intf->slave);
2064 if (gbe_intf)
2065 devm_kfree(gbe_dev->dev, gbe_intf);
2066 return ret;
2067}
2068
2069static int gbe_release(void *intf_priv)
2070{
2071 struct gbe_intf *gbe_intf = intf_priv;
2072
2073 gbe_intf->ndev->ethtool_ops = NULL;
2074 list_del(&gbe_intf->gbe_intf_list);
2075 devm_kfree(gbe_intf->dev, gbe_intf->slave);
2076 devm_kfree(gbe_intf->dev, gbe_intf);
2077 return 0;
2078}
2079
2080static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
2081{
2082 struct gbe_priv *gbe_dev = inst_priv;
2083
2084 del_timer_sync(&gbe_dev->timer);
2085 cpsw_ale_stop(gbe_dev->ale);
2086 cpsw_ale_destroy(gbe_dev->ale);
2087 netcp_txpipe_close(&gbe_dev->tx_pipe);
2088 free_secondary_ports(gbe_dev);
2089
2090 if (!list_empty(&gbe_dev->gbe_intf_head))
2091 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
2092
2093 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
2094 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
2095 memset(gbe_dev, 0x00, sizeof(*gbe_dev));
2096 devm_kfree(gbe_dev->dev, gbe_dev);
2097 return 0;
2098}
2099
2100static struct netcp_module gbe_module = {
2101 .name = GBE_MODULE_NAME,
2102 .owner = THIS_MODULE,
2103 .primary = true,
2104 .probe = gbe_probe,
2105 .open = gbe_open,
2106 .close = gbe_close,
2107 .remove = gbe_remove,
2108 .attach = gbe_attach,
2109 .release = gbe_release,
2110 .add_addr = gbe_add_addr,
2111 .del_addr = gbe_del_addr,
2112 .add_vid = gbe_add_vid,
2113 .del_vid = gbe_del_vid,
2114 .ioctl = gbe_ioctl,
2115};
2116
2117static struct netcp_module xgbe_module = {
2118 .name = XGBE_MODULE_NAME,
2119 .owner = THIS_MODULE,
2120 .primary = true,
2121 .probe = gbe_probe,
2122 .open = gbe_open,
2123 .close = gbe_close,
2124 .remove = gbe_remove,
2125 .attach = gbe_attach,
2126 .release = gbe_release,
2127 .add_addr = gbe_add_addr,
2128 .del_addr = gbe_del_addr,
2129 .add_vid = gbe_add_vid,
2130 .del_vid = gbe_del_vid,
2131 .ioctl = gbe_ioctl,
2132};
2133
2134static int __init keystone_gbe_init(void)
2135{
2136 int ret;
2137
2138 ret = netcp_register_module(&gbe_module);
2139 if (ret)
2140 return ret;
2141
2142 ret = netcp_register_module(&xgbe_module);
2143 if (ret)
2144 return ret;
2145
2146 return 0;
2147}
2148module_init(keystone_gbe_init);
2149
2150static void __exit keystone_gbe_exit(void)
2151{
2152 netcp_unregister_module(&gbe_module);
2153 netcp_unregister_module(&xgbe_module);
2154}
2155module_exit(keystone_gbe_exit);
2156
2157MODULE_LICENSE("GPL v2");
2158MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
2159MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
new file mode 100644
index 000000000000..dbeb14266e2f
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -0,0 +1,131 @@
1/*
2 * SGMI module initialisation
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Wingman Kwok <w-kwok2@ti.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 *
13 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14 * kind, whether express or implied; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include "netcp.h"
20
21#define SGMII_REG_STATUS_LOCK BIT(4)
22#define SGMII_REG_STATUS_LINK BIT(0)
23#define SGMII_REG_STATUS_AUTONEG BIT(2)
24#define SGMII_REG_CONTROL_AUTONEG BIT(0)
25
26#define SGMII23_OFFSET(x) ((x - 2) * 0x100)
27#define SGMII_OFFSET(x) ((x <= 1) ? (x * 0x100) : (SGMII23_OFFSET(x)))
28
29/* SGMII registers */
30#define SGMII_SRESET_REG(x) (SGMII_OFFSET(x) + 0x004)
31#define SGMII_CTL_REG(x) (SGMII_OFFSET(x) + 0x010)
32#define SGMII_STATUS_REG(x) (SGMII_OFFSET(x) + 0x014)
33#define SGMII_MRADV_REG(x) (SGMII_OFFSET(x) + 0x018)
34
35static void sgmii_write_reg(void __iomem *base, int reg, u32 val)
36{
37 writel(val, base + reg);
38}
39
40static u32 sgmii_read_reg(void __iomem *base, int reg)
41{
42 return readl(base + reg);
43}
44
45static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
46{
47 writel((readl(base + reg) | val), base + reg);
48}
49
50/* port is 0 based */
51int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
52{
53 /* Soft reset */
54 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1);
55 while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0)
56 ;
57 return 0;
58}
59
60int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
61{
62 u32 status = 0, link = 0;
63
64 status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
65 if ((status & SGMII_REG_STATUS_LINK) != 0)
66 link = 1;
67 return link;
68}
69
70int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface)
71{
72 unsigned int i, status, mask;
73 u32 mr_adv_ability;
74 u32 control;
75
76 switch (interface) {
77 case SGMII_LINK_MAC_MAC_AUTONEG:
78 mr_adv_ability = 0x9801;
79 control = 0x21;
80 break;
81
82 case SGMII_LINK_MAC_PHY:
83 case SGMII_LINK_MAC_PHY_NO_MDIO:
84 mr_adv_ability = 1;
85 control = 1;
86 break;
87
88 case SGMII_LINK_MAC_MAC_FORCED:
89 mr_adv_ability = 0x9801;
90 control = 0x20;
91 break;
92
93 case SGMII_LINK_MAC_FIBER:
94 mr_adv_ability = 0x20;
95 control = 0x1;
96 break;
97
98 default:
99 WARN_ONCE(1, "Invalid sgmii interface: %d\n", interface);
100 return -EINVAL;
101 }
102
103 sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), 0);
104
105 /* Wait for the SerDes pll to lock */
106 for (i = 0; i < 1000; i++) {
107 usleep_range(1000, 2000);
108 status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
109 if ((status & SGMII_REG_STATUS_LOCK) != 0)
110 break;
111 }
112
113 if ((status & SGMII_REG_STATUS_LOCK) == 0)
114 pr_err("serdes PLL not locked\n");
115
116 sgmii_write_reg(sgmii_ofs, SGMII_MRADV_REG(port), mr_adv_ability);
117 sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), control);
118
119 mask = SGMII_REG_STATUS_LINK;
120 if (control & SGMII_REG_CONTROL_AUTONEG)
121 mask |= SGMII_REG_STATUS_AUTONEG;
122
123 for (i = 0; i < 1000; i++) {
124 usleep_range(200, 500);
125 status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
126 if ((status & mask) == mask)
127 break;
128 }
129
130 return 0;
131}
diff --git a/drivers/net/ethernet/ti/netcp_xgbepcsr.c b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
new file mode 100644
index 000000000000..33571acc52b6
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
@@ -0,0 +1,501 @@
1/*
2 * XGE PCSR module initialisation
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * WingMan Kwok <w-kwok2@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation version 2.
11 *
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17#include "netcp.h"
18
19/* XGBE registers */
20#define XGBE_CTRL_OFFSET 0x0c
21#define XGBE_SGMII_1_OFFSET 0x0114
22#define XGBE_SGMII_2_OFFSET 0x0214
23
24/* PCS-R registers */
25#define PCSR_CPU_CTRL_OFFSET 0x1fd0
26#define POR_EN BIT(29)
27
28#define reg_rmw(addr, value, mask) \
29 writel(((readl(addr) & (~(mask))) | \
30 (value & (mask))), (addr))
31
32/* bit mask of width w at offset s */
33#define MASK_WID_SH(w, s) (((1 << w) - 1) << s)
34
35/* shift value v to offset s */
36#define VAL_SH(v, s) (v << s)
37
38#define PHY_A(serdes) 0
39
40struct serdes_cfg {
41 u32 ofs;
42 u32 val;
43 u32 mask;
44};
45
46static struct serdes_cfg cfg_phyb_1p25g_156p25mhz_cmu0[] = {
47 {0x0000, 0x00800002, 0x00ff00ff},
48 {0x0014, 0x00003838, 0x0000ffff},
49 {0x0060, 0x1c44e438, 0xffffffff},
50 {0x0064, 0x00c18400, 0x00ffffff},
51 {0x0068, 0x17078200, 0xffffff00},
52 {0x006c, 0x00000014, 0x000000ff},
53 {0x0078, 0x0000c000, 0x0000ff00},
54 {0x0000, 0x00000003, 0x000000ff},
55};
56
57static struct serdes_cfg cfg_phyb_10p3125g_156p25mhz_cmu1[] = {
58 {0x0c00, 0x00030002, 0x00ff00ff},
59 {0x0c14, 0x00005252, 0x0000ffff},
60 {0x0c28, 0x80000000, 0xff000000},
61 {0x0c2c, 0x000000f6, 0x000000ff},
62 {0x0c3c, 0x04000405, 0xff00ffff},
63 {0x0c40, 0xc0800000, 0xffff0000},
64 {0x0c44, 0x5a202062, 0xffffffff},
65 {0x0c48, 0x40040424, 0xffffffff},
66 {0x0c4c, 0x00004002, 0x0000ffff},
67 {0x0c50, 0x19001c00, 0xff00ff00},
68 {0x0c54, 0x00002100, 0x0000ff00},
69 {0x0c58, 0x00000060, 0x000000ff},
70 {0x0c60, 0x80131e7c, 0xffffffff},
71 {0x0c64, 0x8400cb02, 0xff00ffff},
72 {0x0c68, 0x17078200, 0xffffff00},
73 {0x0c6c, 0x00000016, 0x000000ff},
74 {0x0c74, 0x00000400, 0x0000ff00},
75 {0x0c78, 0x0000c000, 0x0000ff00},
76 {0x0c00, 0x00000003, 0x000000ff},
77};
78
79static struct serdes_cfg cfg_phyb_10p3125g_16bit_lane[] = {
80 {0x0204, 0x00000080, 0x000000ff},
81 {0x0208, 0x0000920d, 0x0000ffff},
82 {0x0204, 0xfc000000, 0xff000000},
83 {0x0208, 0x00009104, 0x0000ffff},
84 {0x0210, 0x1a000000, 0xff000000},
85 {0x0214, 0x00006b58, 0x00ffffff},
86 {0x0218, 0x75800084, 0xffff00ff},
87 {0x022c, 0x00300000, 0x00ff0000},
88 {0x0230, 0x00003800, 0x0000ff00},
89 {0x024c, 0x008f0000, 0x00ff0000},
90 {0x0250, 0x30000000, 0xff000000},
91 {0x0260, 0x00000002, 0x000000ff},
92 {0x0264, 0x00000057, 0x000000ff},
93 {0x0268, 0x00575700, 0x00ffff00},
94 {0x0278, 0xff000000, 0xff000000},
95 {0x0280, 0x00500050, 0x00ff00ff},
96 {0x0284, 0x00001f15, 0x0000ffff},
97 {0x028c, 0x00006f00, 0x0000ff00},
98 {0x0294, 0x00000000, 0xffffff00},
99 {0x0298, 0x00002640, 0xff00ffff},
100 {0x029c, 0x00000003, 0x000000ff},
101 {0x02a4, 0x00000f13, 0x0000ffff},
102 {0x02a8, 0x0001b600, 0x00ffff00},
103 {0x0380, 0x00000030, 0x000000ff},
104 {0x03c0, 0x00000200, 0x0000ff00},
105 {0x03cc, 0x00000018, 0x000000ff},
106 {0x03cc, 0x00000000, 0x000000ff},
107};
108
109static struct serdes_cfg cfg_phyb_10p3125g_comlane[] = {
110 {0x0a00, 0x00000800, 0x0000ff00},
111 {0x0a84, 0x00000000, 0x000000ff},
112 {0x0a8c, 0x00130000, 0x00ff0000},
113 {0x0a90, 0x77a00000, 0xffff0000},
114 {0x0a94, 0x00007777, 0x0000ffff},
115 {0x0b08, 0x000f0000, 0xffff0000},
116 {0x0b0c, 0x000f0000, 0x00ffffff},
117 {0x0b10, 0xbe000000, 0xff000000},
118 {0x0b14, 0x000000ff, 0x000000ff},
119 {0x0b18, 0x00000014, 0x000000ff},
120 {0x0b5c, 0x981b0000, 0xffff0000},
121 {0x0b64, 0x00001100, 0x0000ff00},
122 {0x0b78, 0x00000c00, 0x0000ff00},
123 {0x0abc, 0xff000000, 0xff000000},
124 {0x0ac0, 0x0000008b, 0x000000ff},
125};
126
127static struct serdes_cfg cfg_cm_c1_c2[] = {
128 {0x0208, 0x00000000, 0x00000f00},
129 {0x0208, 0x00000000, 0x0000001f},
130 {0x0204, 0x00000000, 0x00040000},
131 {0x0208, 0x000000a0, 0x000000e0},
132};
133
134static void netcp_xgbe_serdes_cmu_init(void __iomem *serdes_regs)
135{
136 int i;
137
138 /* cmu0 setup */
139 for (i = 0; i < ARRAY_SIZE(cfg_phyb_1p25g_156p25mhz_cmu0); i++) {
140 reg_rmw(serdes_regs + cfg_phyb_1p25g_156p25mhz_cmu0[i].ofs,
141 cfg_phyb_1p25g_156p25mhz_cmu0[i].val,
142 cfg_phyb_1p25g_156p25mhz_cmu0[i].mask);
143 }
144
145 /* cmu1 setup */
146 for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_156p25mhz_cmu1); i++) {
147 reg_rmw(serdes_regs + cfg_phyb_10p3125g_156p25mhz_cmu1[i].ofs,
148 cfg_phyb_10p3125g_156p25mhz_cmu1[i].val,
149 cfg_phyb_10p3125g_156p25mhz_cmu1[i].mask);
150 }
151}
152
153/* lane is 0 based */
154static void netcp_xgbe_serdes_lane_config(
155 void __iomem *serdes_regs, int lane)
156{
157 int i;
158
159 /* lane setup */
160 for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_16bit_lane); i++) {
161 reg_rmw(serdes_regs +
162 cfg_phyb_10p3125g_16bit_lane[i].ofs +
163 (0x200 * lane),
164 cfg_phyb_10p3125g_16bit_lane[i].val,
165 cfg_phyb_10p3125g_16bit_lane[i].mask);
166 }
167
168 /* disable auto negotiation*/
169 reg_rmw(serdes_regs + (0x200 * lane) + 0x0380,
170 0x00000000, 0x00000010);
171
172 /* disable link training */
173 reg_rmw(serdes_regs + (0x200 * lane) + 0x03c0,
174 0x00000000, 0x00000200);
175}
176
177static void netcp_xgbe_serdes_com_enable(void __iomem *serdes_regs)
178{
179 int i;
180
181 for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_comlane); i++) {
182 reg_rmw(serdes_regs + cfg_phyb_10p3125g_comlane[i].ofs,
183 cfg_phyb_10p3125g_comlane[i].val,
184 cfg_phyb_10p3125g_comlane[i].mask);
185 }
186}
187
188static void netcp_xgbe_serdes_lane_enable(
189 void __iomem *serdes_regs, int lane)
190{
191 /* Set Lane Control Rate */
192 writel(0xe0e9e038, serdes_regs + 0x1fe0 + (4 * lane));
193}
194
195static void netcp_xgbe_serdes_phyb_rst_clr(void __iomem *serdes_regs)
196{
197 reg_rmw(serdes_regs + 0x0a00, 0x0000001f, 0x000000ff);
198}
199
200static void netcp_xgbe_serdes_pll_disable(void __iomem *serdes_regs)
201{
202 writel(0x88000000, serdes_regs + 0x1ff4);
203}
204
205static void netcp_xgbe_serdes_pll_enable(void __iomem *serdes_regs)
206{
207 netcp_xgbe_serdes_phyb_rst_clr(serdes_regs);
208 writel(0xee000000, serdes_regs + 0x1ff4);
209}
210
211static int netcp_xgbe_wait_pll_locked(void __iomem *sw_regs)
212{
213 unsigned long timeout;
214 int ret = 0;
215 u32 val_1, val_0;
216
217 timeout = jiffies + msecs_to_jiffies(500);
218 do {
219 val_0 = (readl(sw_regs + XGBE_SGMII_1_OFFSET) & BIT(4));
220 val_1 = (readl(sw_regs + XGBE_SGMII_2_OFFSET) & BIT(4));
221
222 if (val_1 && val_0)
223 return 0;
224
225 if (time_after(jiffies, timeout)) {
226 ret = -ETIMEDOUT;
227 break;
228 }
229
230 cpu_relax();
231 } while (true);
232
233 pr_err("XGBE serdes not locked: time out.\n");
234 return ret;
235}
236
237static void netcp_xgbe_serdes_enable_xgmii_port(void __iomem *sw_regs)
238{
239 writel(0x03, sw_regs + XGBE_CTRL_OFFSET);
240}
241
242static u32 netcp_xgbe_serdes_read_tbus_val(void __iomem *serdes_regs)
243{
244 u32 tmp;
245
246 if (PHY_A(serdes_regs)) {
247 tmp = (readl(serdes_regs + 0x0ec) >> 24) & 0x0ff;
248 tmp |= ((readl(serdes_regs + 0x0fc) >> 16) & 0x00f00);
249 } else {
250 tmp = (readl(serdes_regs + 0x0f8) >> 16) & 0x0fff;
251 }
252
253 return tmp;
254}
255
256static void netcp_xgbe_serdes_write_tbus_addr(void __iomem *serdes_regs,
257 int select, int ofs)
258{
259 if (PHY_A(serdes_regs)) {
260 reg_rmw(serdes_regs + 0x0008, ((select << 5) + ofs) << 24,
261 ~0x00ffffff);
262 return;
263 }
264
265 /* For 2 lane Phy-B, lane0 is actually lane1 */
266 switch (select) {
267 case 1:
268 select = 2;
269 break;
270 case 2:
271 select = 3;
272 break;
273 default:
274 return;
275 }
276
277 reg_rmw(serdes_regs + 0x00fc, ((select << 8) + ofs) << 16, ~0xf800ffff);
278}
279
280static u32 netcp_xgbe_serdes_read_select_tbus(void __iomem *serdes_regs,
281 int select, int ofs)
282{
283 /* Set tbus address */
284 netcp_xgbe_serdes_write_tbus_addr(serdes_regs, select, ofs);
285 /* Get TBUS Value */
286 return netcp_xgbe_serdes_read_tbus_val(serdes_regs);
287}
288
289static void netcp_xgbe_serdes_reset_cdr(void __iomem *serdes_regs,
290 void __iomem *sig_detect_reg, int lane)
291{
292 u32 tmp, dlpf, tbus;
293
294 /*Get the DLPF values */
295 tmp = netcp_xgbe_serdes_read_select_tbus(
296 serdes_regs, lane + 1, 5);
297
298 dlpf = tmp >> 2;
299
300 if (dlpf < 400 || dlpf > 700) {
301 reg_rmw(sig_detect_reg, VAL_SH(2, 1), MASK_WID_SH(2, 1));
302 mdelay(1);
303 reg_rmw(sig_detect_reg, VAL_SH(0, 1), MASK_WID_SH(2, 1));
304 } else {
305 tbus = netcp_xgbe_serdes_read_select_tbus(serdes_regs, lane +
306 1, 0xe);
307
308 pr_debug("XGBE: CDR centered, DLPF: %4d,%d,%d.\n",
309 tmp >> 2, tmp & 3, (tbus >> 2) & 3);
310 }
311}
312
313/* Call every 100 ms */
314static int netcp_xgbe_check_link_status(void __iomem *serdes_regs,
315 void __iomem *sw_regs, u32 lanes,
316 u32 *current_state, u32 *lane_down)
317{
318 void __iomem *pcsr_base = sw_regs + 0x0600;
319 void __iomem *sig_detect_reg;
320 u32 pcsr_rx_stat, blk_lock, blk_errs;
321 int loss, i, status = 1;
322
323 for (i = 0; i < lanes; i++) {
324 /* Get the Loss bit */
325 loss = readl(serdes_regs + 0x1fc0 + 0x20 + (i * 0x04)) & 0x1;
326
327 /* Get Block Errors and Block Lock bits */
328 pcsr_rx_stat = readl(pcsr_base + 0x0c + (i * 0x80));
329 blk_lock = (pcsr_rx_stat >> 30) & 0x1;
330 blk_errs = (pcsr_rx_stat >> 16) & 0x0ff;
331
332 /* Get Signal Detect Overlay Address */
333 sig_detect_reg = serdes_regs + (i * 0x200) + 0x200 + 0x04;
334
335 /* If Block errors maxed out, attempt recovery! */
336 if (blk_errs == 0x0ff)
337 blk_lock = 0;
338
339 switch (current_state[i]) {
340 case 0:
341 /* if good link lock the signal detect ON! */
342 if (!loss && blk_lock) {
343 pr_debug("XGBE PCSR Linked Lane: %d\n", i);
344 reg_rmw(sig_detect_reg, VAL_SH(3, 1),
345 MASK_WID_SH(2, 1));
346 current_state[i] = 1;
347 } else if (!blk_lock) {
348 /* if no lock, then reset CDR */
349 pr_debug("XGBE PCSR Recover Lane: %d\n", i);
350 netcp_xgbe_serdes_reset_cdr(serdes_regs,
351 sig_detect_reg, i);
352 }
353 break;
354
355 case 1:
356 if (!blk_lock) {
357 /* Link Lost? */
358 lane_down[i] = 1;
359 current_state[i] = 2;
360 }
361 break;
362
363 case 2:
364 if (blk_lock)
365 /* Nope just noise */
366 current_state[i] = 1;
367 else {
368 /* Lost the block lock, reset CDR if it is
369 * not centered and go back to sync state
370 */
371 netcp_xgbe_serdes_reset_cdr(serdes_regs,
372 sig_detect_reg, i);
373 current_state[i] = 0;
374 }
375 break;
376
377 default:
378 pr_err("XGBE: unknown current_state[%d] %d\n",
379 i, current_state[i]);
380 break;
381 }
382
383 if (blk_errs > 0) {
384 /* Reset the Error counts! */
385 reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x19, 0),
386 MASK_WID_SH(8, 0));
387
388 reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x00, 0),
389 MASK_WID_SH(8, 0));
390 }
391
392 status &= (current_state[i] == 1);
393 }
394
395 return status;
396}
397
398static int netcp_xgbe_serdes_check_lane(void __iomem *serdes_regs,
399 void __iomem *sw_regs)
400{
401 u32 current_state[2] = {0, 0};
402 int retries = 0, link_up;
403 u32 lane_down[2];
404
405 do {
406 lane_down[0] = 0;
407 lane_down[1] = 0;
408
409 link_up = netcp_xgbe_check_link_status(serdes_regs, sw_regs, 2,
410 current_state,
411 lane_down);
412
413 /* if we did not get link up then wait 100ms before calling
414 * it again
415 */
416 if (link_up)
417 break;
418
419 if (lane_down[0])
420 pr_debug("XGBE: detected link down on lane 0\n");
421
422 if (lane_down[1])
423 pr_debug("XGBE: detected link down on lane 1\n");
424
425 if (++retries > 1) {
426 pr_debug("XGBE: timeout waiting for serdes link up\n");
427 return -ETIMEDOUT;
428 }
429 mdelay(100);
430 } while (!link_up);
431
432 pr_debug("XGBE: PCSR link is up\n");
433 return 0;
434}
435
436static void netcp_xgbe_serdes_setup_cm_c1_c2(void __iomem *serdes_regs,
437 int lane, int cm, int c1, int c2)
438{
439 int i;
440
441 for (i = 0; i < ARRAY_SIZE(cfg_cm_c1_c2); i++) {
442 reg_rmw(serdes_regs + cfg_cm_c1_c2[i].ofs + (0x200 * lane),
443 cfg_cm_c1_c2[i].val,
444 cfg_cm_c1_c2[i].mask);
445 }
446}
447
448static void netcp_xgbe_reset_serdes(void __iomem *serdes_regs)
449{
450 /* Toggle the POR_EN bit in CONFIG.CPU_CTRL */
451 /* enable POR_EN bit */
452 reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, POR_EN, POR_EN);
453 usleep_range(10, 100);
454
455 /* disable POR_EN bit */
456 reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, 0, POR_EN);
457 usleep_range(10, 100);
458}
459
460static int netcp_xgbe_serdes_config(void __iomem *serdes_regs,
461 void __iomem *sw_regs)
462{
463 u32 ret, i;
464
465 netcp_xgbe_serdes_pll_disable(serdes_regs);
466 netcp_xgbe_serdes_cmu_init(serdes_regs);
467
468 for (i = 0; i < 2; i++)
469 netcp_xgbe_serdes_lane_config(serdes_regs, i);
470
471 netcp_xgbe_serdes_com_enable(serdes_regs);
472 /* This is EVM + RTM-BOC specific */
473 for (i = 0; i < 2; i++)
474 netcp_xgbe_serdes_setup_cm_c1_c2(serdes_regs, i, 0, 0, 5);
475
476 netcp_xgbe_serdes_pll_enable(serdes_regs);
477 for (i = 0; i < 2; i++)
478 netcp_xgbe_serdes_lane_enable(serdes_regs, i);
479
480 /* SB PLL Status Poll */
481 ret = netcp_xgbe_wait_pll_locked(sw_regs);
482 if (ret)
483 return ret;
484
485 netcp_xgbe_serdes_enable_xgmii_port(sw_regs);
486 netcp_xgbe_serdes_check_lane(serdes_regs, sw_regs);
487 return ret;
488}
489
490int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs)
491{
492 u32 val;
493
494 /* read COMLANE bits 4:0 */
495 val = readl(serdes_regs + 0xa00);
496 if (val & 0x1f) {
497 pr_debug("XGBE: serdes already in operation - reset\n");
498 netcp_xgbe_reset_serdes(serdes_regs);
499 }
500 return netcp_xgbe_serdes_config(serdes_regs, xgbe_regs);
501}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index f2ff0074aac9..691ec936e88d 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -2540,7 +2540,7 @@ static void tlan_phy_power_down(struct net_device *dev)
2540 * This is abitrary. It is intended to make sure the 2540 * This is abitrary. It is intended to make sure the
2541 * transceiver settles. 2541 * transceiver settles.
2542 */ 2542 */
2543 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP); 2543 tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
2544 2544
2545} 2545}
2546 2546
@@ -2561,7 +2561,7 @@ static void tlan_phy_power_up(struct net_device *dev)
2561 * transceiver. The TLAN docs say both 50 ms and 2561 * transceiver. The TLAN docs say both 50 ms and
2562 * 500 ms, so do the longer, just in case. 2562 * 500 ms, so do the longer, just in case.
2563 */ 2563 */
2564 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET); 2564 tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET);
2565 2565
2566} 2566}
2567 2567
@@ -2593,7 +2593,7 @@ static void tlan_phy_reset(struct net_device *dev)
2593 * I don't remember why I wait this long. 2593 * I don't remember why I wait this long.
2594 * I've changed this to 50ms, as it seems long enough. 2594 * I've changed this to 50ms, as it seems long enough.
2595 */ 2595 */
2596 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK); 2596 tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK);
2597 2597
2598} 2598}
2599 2599
@@ -2658,7 +2658,7 @@ static void tlan_phy_start_link(struct net_device *dev)
2658 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN 2658 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2659 | TLAN_NET_CFG_PHY_EN; 2659 | TLAN_NET_CFG_PHY_EN;
2660 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data); 2660 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2661 tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN); 2661 tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN);
2662 return; 2662 return;
2663 } else if (priv->phy_num == 0) { 2663 } else if (priv->phy_num == 0) {
2664 control = 0; 2664 control = 0;
@@ -2725,7 +2725,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
2725 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) && 2725 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2726 (priv->phy_num != 0)) { 2726 (priv->phy_num != 0)) {
2727 priv->phy_num = 0; 2727 priv->phy_num = 0;
2728 tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN); 2728 tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN);
2729 return; 2729 return;
2730 } 2730 }
2731 2731
@@ -2744,7 +2744,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
2744 2744
2745 /* Wait for 100 ms. No reason in partiticular. 2745 /* Wait for 100 ms. No reason in partiticular.
2746 */ 2746 */
2747 tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET); 2747 tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET);
2748 2748
2749} 2749}
2750 2750
@@ -2796,7 +2796,7 @@ static void tlan_phy_monitor(unsigned long data)
2796 /* set to external PHY */ 2796 /* set to external PHY */
2797 priv->phy_num = 1; 2797 priv->phy_num = 1;
2798 /* restart autonegotiation */ 2798 /* restart autonegotiation */
2799 tlan_set_timer(dev, 4 * HZ / 10, 2799 tlan_set_timer(dev, msecs_to_jiffies(400),
2800 TLAN_TIMER_PHY_PDOWN); 2800 TLAN_TIMER_PHY_PDOWN);
2801 return; 2801 return;
2802 } 2802 }
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index a191afc23b56..17e276651601 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1326,7 +1326,8 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1326 struct rhine_private *rp = netdev_priv(dev); 1326 struct rhine_private *rp = netdev_priv(dev);
1327 void __iomem *ioaddr = rp->base; 1327 void __iomem *ioaddr = rp->base;
1328 1328
1329 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); 1329 if (!rp->mii_if.force_media)
1330 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1330 1331
1331 if (rp->mii_if.full_duplex) 1332 if (rp->mii_if.full_duplex)
1332 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, 1333 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
@@ -1781,8 +1782,8 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1781 rp->tx_ring[entry].desc_length = 1782 rp->tx_ring[entry].desc_length =
1782 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1783 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1783 1784
1784 if (unlikely(vlan_tx_tag_present(skb))) { 1785 if (unlikely(skb_vlan_tag_present(skb))) {
1785 u16 vid_pcp = vlan_tx_tag_get(skb); 1786 u16 vid_pcp = skb_vlan_tag_get(skb);
1786 1787
1787 /* drop CFI/DEI bit, register needs VID and PCP */ 1788 /* drop CFI/DEI bit, register needs VID and PCP */
1788 vid_pcp = (vid_pcp & VLAN_VID_MASK) | 1789 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
@@ -1803,7 +1804,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1803 1804
1804 /* Non-x86 Todo: explicitly flush cache lines here. */ 1805 /* Non-x86 Todo: explicitly flush cache lines here. */
1805 1806
1806 if (vlan_tx_tag_present(skb)) 1807 if (skb_vlan_tag_present(skb))
1807 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ 1808 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1808 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); 1809 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1809 1810
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 282f83a63b67..c20206f83cc1 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2611,8 +2611,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2611 2611
2612 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; 2612 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2613 2613
2614 if (vlan_tx_tag_present(skb)) { 2614 if (skb_vlan_tag_present(skb)) {
2615 td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 2615 td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2616 td_ptr->tdesc1.TCR |= TCR0_VETAG; 2616 td_ptr->tdesc1.TCR |= TCR0_VETAG;
2617 } 2617 }
2618 2618